1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

Compare commits

..

No commits in common. "372800cb95a35a7c40a07e2e0f7de4ce6786d230" and "623fb9912f6af600cda3b6bd166ac738c1115ef4" have entirely different histories.

77 changed files with 1137 additions and 1344 deletions

View File

@ -416,7 +416,6 @@ lm_change yes no no
lm_breaker_owns_lease: yes no no
lm_lock_expirable yes no no
lm_expire_lock no no yes
lm_open_conflict yes no no
====================== ============= ================= =========
buffer_head

View File

@ -2159,7 +2159,7 @@ M: Alice Ryhl <aliceryhl@google.com>
L: dri-devel@lists.freedesktop.org
S: Supported
W: https://rust-for-linux.com/tyr-gpu-driver
W: https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html
W https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html
B: https://gitlab.freedesktop.org/panfrost/linux/-/issues
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git
F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
@ -8068,7 +8068,7 @@ W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next
T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
F: Documentation/gpu/nova/
F: drivers/gpu/nova-core/
@ -8080,7 +8080,7 @@ W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next
T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
F: Documentation/gpu/nova/
F: drivers/gpu/drm/nova/
F: include/uapi/drm/nova_drm.h
@ -8358,7 +8358,6 @@ X: drivers/gpu/drm/msm/
X: drivers/gpu/drm/nova/
X: drivers/gpu/drm/radeon/
X: drivers/gpu/drm/tegra/
X: drivers/gpu/drm/tyr/
X: drivers/gpu/drm/xe/
DRM DRIVERS AND COMMON INFRASTRUCTURE [RUST]

View File

@ -181,28 +181,6 @@ static int __init ofpci_debug(char *str)
__setup("ofpci_debug=", ofpci_debug);
static void of_fixup_pci_pref(struct pci_dev *dev, int index,
struct resource *res)
{
struct pci_bus_region region;
if (!(res->flags & IORESOURCE_MEM_64))
return;
if (!resource_size(res))
return;
pcibios_resource_to_bus(dev->bus, &region, res);
if (region.end <= ~((u32)0))
return;
if (!(res->flags & IORESOURCE_PREFETCH)) {
res->flags |= IORESOURCE_PREFETCH;
pci_info(dev, "reg 0x%x: fixup: pref added to 64-bit resource\n",
index);
}
}
static unsigned long pci_parse_of_flags(u32 addr0)
{
unsigned long flags = 0;
@ -266,7 +244,6 @@ static void pci_parse_of_addrs(struct platform_device *op,
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
of_fixup_pci_pref(dev, i, res);
pci_info(dev, "reg 0x%x: %pR\n", i, res);
}

View File

@ -188,7 +188,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
* the IRQ value, which is hardwired to specific interrupt inputs on
* the interrupt controller.
*/
pr_debug("%04x:%02x:%02x[%c] -> %s[%u]\n",
pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n",
entry->id.segment, entry->id.bus, entry->id.device,
pin_name(entry->pin), prt->source, entry->index);
@ -384,7 +384,7 @@ static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
u32 gsi;
int gsi;
u8 pin;
int triggering = ACPI_LEVEL_SENSITIVE;
/*
@ -422,21 +422,18 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
rc = -ENODEV;
if (entry) {
if (entry->link)
rc = acpi_pci_link_allocate_irq(entry->link,
gsi = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&triggering, &polarity,
&link, &gsi);
else {
&link);
else
gsi = entry->index;
rc = 0;
}
}
} else
gsi = -1;
if (rc < 0) {
if (gsi < 0) {
/*
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.

View File

@ -448,7 +448,7 @@ static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
/* >IRQ15 */
};
static int acpi_irq_pci_sharing_penalty(u32 irq)
static int acpi_irq_pci_sharing_penalty(int irq)
{
struct acpi_pci_link *link;
int penalty = 0;
@ -474,7 +474,7 @@ static int acpi_irq_pci_sharing_penalty(u32 irq)
return penalty;
}
static int acpi_irq_get_penalty(u32 irq)
static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
@ -528,7 +528,7 @@ static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
u32 irq;
int irq;
int i;
if (link->irq.initialized) {
@ -598,53 +598,44 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
return 0;
}
/**
* acpi_pci_link_allocate_irq(): Retrieve a link device GSI
*
* @handle: Handle for the link device
* @index: GSI index
* @triggering: pointer to store the GSI trigger
* @polarity: pointer to store GSI polarity
* @name: pointer to store link device name
* @gsi: pointer to store GSI number
*
* Returns:
* 0 on success with @triggering, @polarity, @name, @gsi initialized.
* -ENODEV on failure
/*
* acpi_pci_link_allocate_irq
* success: return IRQ >= 0
* failure: return -1
*/
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name, u32 *gsi)
int *polarity, char **name)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -ENODEV;
return -1;
}
link = acpi_driver_data(device);
if (!link) {
acpi_handle_err(handle, "Invalid link context\n");
return -ENODEV;
return -1;
}
/* TBD: Support multiple index (IRQ) entries per Link Device */
if (index) {
acpi_handle_err(handle, "Invalid index %d\n", index);
return -ENODEV;
return -1;
}
mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
mutex_unlock(&acpi_link_lock);
return -ENODEV;
return -1;
}
if (!link->irq.active) {
mutex_unlock(&acpi_link_lock);
acpi_handle_err(handle, "Link active IRQ is 0!\n");
return -ENODEV;
return -1;
}
link->refcnt++;
mutex_unlock(&acpi_link_lock);
@ -656,9 +647,7 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
if (name)
*name = acpi_device_bid(link->device);
acpi_handle_debug(handle, "Link is referenced\n");
*gsi = link->irq.active;
return 0;
return link->irq.active;
}
/*

View File

@ -41,6 +41,8 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
adf_error_notifier(accel_dev);
adf_pf2vf_notify_fatal_error(accel_dev);
adf_dev_restarting_notify(accel_dev);
adf_pf2vf_notify_restarting(accel_dev);
adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_clear_master(pdev);
adf_dev_down(accel_dev);

View File

@ -12,7 +12,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -242,17 +241,23 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
mask = 1 << (gpio_num % 8);
group = (gpio_num / 8);
guard(spinlock)(&it87_gpio->lock);
spin_lock(&it87_gpio->lock);
rc = superio_enter();
if (rc)
return rc;
goto exit;
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
rc = it87_gpio_set(chip, gpio_num, val);
if (rc)
goto exit;
superio_exit();
exit:
spin_unlock(&it87_gpio->lock);
return rc;
}

View File

@ -548,13 +548,6 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
static void gpio_mpsse_usb_put_dev(void *data)
{
struct mpsse_priv *priv = data;
usb_put_dev(priv->udev);
}
static int mpsse_init_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
@ -599,10 +592,6 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->workers);
priv->udev = usb_get_dev(interface_to_usbdev(interface));
err = devm_add_action_or_reset(dev, gpio_mpsse_usb_put_dev, priv);
if (err)
return err;
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@ -724,6 +713,7 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
usb_put_dev(priv->udev);
}
static struct usb_driver gpio_mpsse_driver = {

View File

@ -943,35 +943,14 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(old_stat, MAX_LINE);
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(int_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
DECLARE_BITMAP(edges, MAX_LINE);
int ret;
if (chip->driver_data & PCA_PCAL) {
/* Read INT_STAT before it is cleared by the input-port read. */
ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, int_stat);
if (ret)
return false;
}
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
if (chip->driver_data & PCA_PCAL) {
/* Detect short pulses via INT_STAT. */
bitmap_and(trigger, int_stat, chip->irq_mask, gc->ngpio);
/* Apply filter for rising/falling edge selection. */
bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise,
cur_stat, gc->ngpio);
bitmap_and(int_stat, new_stat, trigger, gc->ngpio);
} else {
bitmap_zero(int_stat, gc->ngpio);
}
/* Remove output pins from the equation */
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
@ -985,8 +964,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
if (bitmap_empty(trigger, gc->ngpio) &&
bitmap_empty(int_stat, gc->ngpio))
if (bitmap_empty(trigger, gc->ngpio))
return false;
}
@ -994,7 +972,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
bitmap_and(pending, edges, trigger, gc->ngpio);
bitmap_or(pending, pending, int_stat, gc->ngpio);
bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);

View File

@ -593,7 +593,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
gc->can_sleep = true;
ret = gpiochip_add_data(gc, bank);
if (ret) {

View File

@ -38,10 +38,8 @@ struct gpio_shared_ref {
int dev_id;
/* Protects the auxiliary device struct and the lookup table. */
struct mutex lock;
struct lock_class_key lock_key;
struct auxiliary_device adev;
struct gpiod_lookup_table *lookup;
bool is_reset_gpio;
};
/* Represents a single GPIO pin. */
@ -78,60 +76,6 @@ gpio_shared_find_entry(struct fwnode_handle *controller_node,
return NULL;
}
static struct gpio_shared_ref *gpio_shared_make_ref(struct fwnode_handle *fwnode,
const char *con_id,
enum gpiod_flags flags)
{
char *con_id_cpy __free(kfree) = NULL;
struct gpio_shared_ref *ref __free(kfree) = kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
return NULL;
if (con_id) {
con_id_cpy = kstrdup(con_id, GFP_KERNEL);
if (!con_id_cpy)
return NULL;
}
ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
if (ref->dev_id < 0)
return NULL;
ref->flags = flags;
ref->con_id = no_free_ptr(con_id_cpy);
ref->fwnode = fwnode;
lockdep_register_key(&ref->lock_key);
mutex_init_with_key(&ref->lock, &ref->lock_key);
return no_free_ptr(ref);
}
static int gpio_shared_setup_reset_proxy(struct gpio_shared_entry *entry,
enum gpiod_flags flags)
{
struct gpio_shared_ref *ref;
list_for_each_entry(ref, &entry->refs, list) {
if (ref->is_reset_gpio)
/* Already set-up. */
return 0;
}
ref = gpio_shared_make_ref(NULL, "reset", flags);
if (!ref)
return -ENOMEM;
ref->is_reset_gpio = true;
list_add_tail(&ref->list, &entry->refs);
pr_debug("Created a secondary shared GPIO reference for potential reset-gpio device for GPIO %u at %s\n",
entry->offset, fwnode_get_name(entry->fwnode));
return 0;
}
/* Handle all special nodes that we should ignore. */
static bool gpio_shared_of_node_ignore(struct device_node *node)
{
@ -162,7 +106,6 @@ static int gpio_shared_of_traverse(struct device_node *curr)
size_t con_id_len, suffix_len;
struct fwnode_handle *fwnode;
struct of_phandle_args args;
struct gpio_shared_ref *ref;
struct property *prop;
unsigned int offset;
const char *suffix;
@ -195,7 +138,6 @@ static int gpio_shared_of_traverse(struct device_node *curr)
for (i = 0; i < count; i++) {
struct device_node *np __free(device_node) = NULL;
char *con_id __free(kfree) = NULL;
ret = of_parse_phandle_with_args(curr, prop->name,
"#gpio-cells", i,
@ -240,6 +182,15 @@ static int gpio_shared_of_traverse(struct device_node *curr)
list_add_tail(&entry->list, &gpio_shared_list);
}
struct gpio_shared_ref *ref __free(kfree) =
kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
return -ENOMEM;
ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr));
ref->flags = args.args[1];
mutex_init(&ref->lock);
if (strends(prop->name, "gpios"))
suffix = "-gpios";
else if (strends(prop->name, "gpio"))
@ -251,32 +202,27 @@ static int gpio_shared_of_traverse(struct device_node *curr)
/* We only set con_id if there's actually one. */
if (strcmp(prop->name, "gpios") && strcmp(prop->name, "gpio")) {
con_id = kstrdup(prop->name, GFP_KERNEL);
if (!con_id)
ref->con_id = kstrdup(prop->name, GFP_KERNEL);
if (!ref->con_id)
return -ENOMEM;
con_id_len = strlen(con_id);
con_id_len = strlen(ref->con_id);
suffix_len = strlen(suffix);
con_id[con_id_len - suffix_len] = '\0';
ref->con_id[con_id_len - suffix_len] = '\0';
}
ref = gpio_shared_make_ref(fwnode_handle_get(of_fwnode_handle(curr)),
con_id, args.args[1]);
if (!ref)
ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
if (ref->dev_id < 0) {
kfree(ref->con_id);
return -ENOMEM;
}
if (!list_empty(&entry->refs))
pr_debug("GPIO %u at %s is shared by multiple firmware nodes\n",
entry->offset, fwnode_get_name(entry->fwnode));
list_add_tail(&ref->list, &entry->refs);
if (strcmp(prop->name, "reset-gpios") == 0) {
ret = gpio_shared_setup_reset_proxy(entry, args.args[1]);
if (ret)
return ret;
}
list_add_tail(&no_free_ptr(ref)->list, &entry->refs);
}
}
@ -360,16 +306,20 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
struct fwnode_handle *reset_fwnode = dev_fwnode(consumer);
struct fwnode_reference_args ref_args, aux_args;
struct device *parent = consumer->parent;
struct gpio_shared_ref *real_ref;
bool match;
int ret;
lockdep_assert_held(&ref->lock);
/* The reset-gpio device must have a parent AND a firmware node. */
if (!parent || !reset_fwnode)
return false;
/*
* FIXME: use device_is_compatible() once the reset-gpio drivers gains
* a compatible string which it currently does not have.
*/
if (!strstarts(dev_name(consumer), "reset.gpio."))
return false;
/*
* Parent of the reset-gpio auxiliary device is the GPIO chip whose
* fwnode we stored in the entry structure.
@ -378,61 +328,33 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
return false;
/*
* Now we need to find the actual pin we want to assign to this
* reset-gpio device. To that end: iterate over the list of references
* of this entry and see if there's one, whose reset-gpios property's
* arguments match the ones from this consumer's node.
* The device associated with the shared reference's firmware node is
* the consumer of the reset control exposed by the reset-gpio device.
* It must have a "reset-gpios" property that's referencing the entry's
* firmware node.
*
* The reference args must agree between the real consumer and the
* auxiliary reset-gpio device.
*/
list_for_each_entry(real_ref, &entry->refs, list) {
if (real_ref == ref)
continue;
guard(mutex)(&real_ref->lock);
if (!real_ref->fwnode)
continue;
/*
* The device associated with the shared reference's firmware
* node is the consumer of the reset control exposed by the
* reset-gpio device. It must have a "reset-gpios" property
* that's referencing the entry's firmware node.
*
* The reference args must agree between the real consumer and
* the auxiliary reset-gpio device.
*/
ret = fwnode_property_get_reference_args(real_ref->fwnode,
"reset-gpios",
NULL, 2, 0, &ref_args);
if (ret)
continue;
ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
NULL, 2, 0, &aux_args);
if (ret) {
fwnode_handle_put(ref_args.fwnode);
continue;
}
match = ((ref_args.fwnode == entry->fwnode) &&
(aux_args.fwnode == entry->fwnode) &&
(ref_args.args[0] == aux_args.args[0]));
ret = fwnode_property_get_reference_args(ref->fwnode, "reset-gpios",
NULL, 2, 0, &ref_args);
if (ret)
return false;
ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
NULL, 2, 0, &aux_args);
if (ret) {
fwnode_handle_put(ref_args.fwnode);
fwnode_handle_put(aux_args.fwnode);
if (!match)
continue;
/*
* Reuse the fwnode of the real device, next time we'll use it
* in the normal path.
*/
ref->fwnode = fwnode_handle_get(reset_fwnode);
return true;
return false;
}
return false;
match = ((ref_args.fwnode == entry->fwnode) &&
(aux_args.fwnode == entry->fwnode) &&
(ref_args.args[0] == aux_args.args[0]));
fwnode_handle_put(ref_args.fwnode);
fwnode_handle_put(aux_args.fwnode);
return match;
}
#else
static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
@ -443,34 +365,25 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
}
#endif /* CONFIG_RESET_GPIO */
int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
unsigned long lflags)
int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
{
const char *dev_id = dev_name(consumer);
struct gpiod_lookup_table *lookup;
struct gpio_shared_entry *entry;
struct gpio_shared_ref *ref;
struct gpiod_lookup_table *lookup __free(kfree) =
kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
list_for_each_entry(entry, &gpio_shared_list, list) {
list_for_each_entry(ref, &entry->refs, list) {
if (!device_match_fwnode(consumer, ref->fwnode) &&
!gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
continue;
guard(mutex)(&ref->lock);
/*
* FIXME: use device_is_compatible() once the reset-gpio
* drivers gains a compatible string which it currently
* does not have.
*/
if (!ref->fwnode && strstarts(dev_name(consumer), "reset.gpio.")) {
if (!gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
continue;
} else if (!device_match_fwnode(consumer, ref->fwnode)) {
continue;
}
if ((!con_id && ref->con_id) || (con_id && !ref->con_id) ||
(con_id && ref->con_id && strcmp(con_id, ref->con_id) != 0))
continue;
/* We've already done that on a previous request. */
if (ref->lookup)
return 0;
@ -482,10 +395,6 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
if (!key)
return -ENOMEM;
lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
pr_debug("Adding machine lookup entry for a shared GPIO for consumer %s, with key '%s' and con_id '%s'\n",
dev_id, key, ref->con_id ?: "none");
@ -493,7 +402,7 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
ref->con_id, lflags);
ref->lookup = lookup;
ref->lookup = no_free_ptr(lookup);
gpiod_add_lookup_table(ref->lookup);
return 0;
@ -557,9 +466,8 @@ int gpio_device_setup_shared(struct gpio_device *gdev)
entry->offset, gpio_device_get_label(gdev));
list_for_each_entry(ref, &entry->refs, list) {
pr_debug("Setting up a shared GPIO entry for %s (con_id: '%s')\n",
fwnode_get_name(ref->fwnode) ?: "(no fwnode)",
ref->con_id ?: "(none)");
pr_debug("Setting up a shared GPIO entry for %s\n",
fwnode_get_name(ref->fwnode));
ret = gpio_shared_make_adev(gdev, entry, ref);
if (ret)
@ -579,6 +487,15 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
if (!device_match_fwnode(&gdev->dev, entry->fwnode))
continue;
/*
* For some reason if we call synchronize_srcu() in GPIO core,
* descent here and take this mutex and then recursively call
* synchronize_srcu() again from gpiochip_remove() (which is
* totally fine) called after gpio_shared_remove_adev(),
* lockdep prints a false positive deadlock splat. Disable
* lockdep here.
*/
lockdep_off();
list_for_each_entry(ref, &entry->refs, list) {
guard(mutex)(&ref->lock);
@ -591,6 +508,7 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
gpio_shared_remove_adev(&ref->adev);
}
lockdep_on();
}
}
@ -686,7 +604,6 @@ static void gpio_shared_drop_ref(struct gpio_shared_ref *ref)
{
list_del(&ref->list);
mutex_destroy(&ref->lock);
lockdep_unregister_key(&ref->lock_key);
kfree(ref->con_id);
ida_free(&gpio_shared_ida, ref->dev_id);
fwnode_handle_put(ref->fwnode);
@ -718,38 +635,12 @@ static void __init gpio_shared_teardown(void)
}
}
static bool gpio_shared_entry_is_really_shared(struct gpio_shared_entry *entry)
{
size_t num_nodes = list_count_nodes(&entry->refs);
struct gpio_shared_ref *ref;
if (num_nodes <= 1)
return false;
if (num_nodes > 2)
return true;
/* Exactly two references: */
list_for_each_entry(ref, &entry->refs, list) {
/*
* Corner-case: the second reference comes from the potential
* reset-gpio instance. However, this pin is not really shared
* as it would have three references in this case. Avoid
* creating unnecessary proxies.
*/
if (ref->is_reset_gpio)
return false;
}
return true;
}
static void gpio_shared_free_exclusive(void)
{
struct gpio_shared_entry *entry, *epos;
list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
if (gpio_shared_entry_is_really_shared(entry))
if (list_count_nodes(&entry->refs) > 1)
continue;
gpio_shared_drop_ref(list_first_entry(&entry->refs,

View File

@ -16,8 +16,7 @@ struct device;
int gpio_device_setup_shared(struct gpio_device *gdev);
void gpio_device_teardown_shared(struct gpio_device *gdev);
int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
unsigned long lflags);
int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags);
#else
@ -29,7 +28,6 @@ static inline int gpio_device_setup_shared(struct gpio_device *gdev)
static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { }
static inline int gpio_shared_add_proxy_lookup(struct device *consumer,
const char *con_id,
unsigned long lflags)
{
return 0;

View File

@ -1105,18 +1105,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->ngpio = gc->ngpio;
gdev->can_sleep = gc->can_sleep;
rwlock_init(&gdev->line_state_lock);
RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
if (ret)
goto err_free_label;
ret = init_srcu_struct(&gdev->desc_srcu);
if (ret)
goto err_cleanup_gdev_srcu;
scoped_guard(mutex, &gpio_devices_lock) {
/*
* TODO: this allocates a Linux GPIO number base in the global
@ -1131,7 +1119,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (base < 0) {
ret = base;
base = 0;
goto err_cleanup_desc_srcu;
goto err_free_label;
}
/*
@ -1151,10 +1139,22 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
goto err_cleanup_desc_srcu;
goto err_free_label;
}
}
rwlock_init(&gdev->line_state_lock);
RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
if (ret)
goto err_remove_from_list;
ret = init_srcu_struct(&gdev->desc_srcu);
if (ret)
goto err_cleanup_gdev_srcu;
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@ -1164,11 +1164,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_set_names(gc);
if (ret)
goto err_remove_from_list;
goto err_cleanup_desc_srcu;
ret = gpiochip_init_valid_mask(gc);
if (ret)
goto err_remove_from_list;
goto err_cleanup_desc_srcu;
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
@ -1248,6 +1248,10 @@ err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_valid_mask:
gpiochip_free_valid_mask(gc);
err_cleanup_desc_srcu:
cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
@ -1257,10 +1261,6 @@ err_remove_from_list:
gpio_device_put(gdev);
goto err_print_message;
}
err_cleanup_desc_srcu:
cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@ -4508,41 +4508,45 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
static bool gpiod_match_lookup_table(struct device *dev,
const struct gpiod_lookup_table *table)
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
struct gpiod_lookup_table *table;
lockdep_assert_held(&gpio_lookup_lock);
if (table->dev_id && dev_id) {
/*
* Valid strings on both ends, must be identical to have
* a match
*/
if (!strcmp(table->dev_id, dev_id))
return true;
} else {
/*
* One of the pointers is NULL, so both must be to have
* a match
*/
if (dev_id == table->dev_id)
return true;
list_for_each_entry(table, &gpio_lookup_list, list) {
if (table->dev_id && dev_id) {
/*
* Valid strings on both ends, must be identical to have
* a match
*/
if (!strcmp(table->dev_id, dev_id))
return table;
} else {
/*
* One of the pointers is NULL, so both must be to have
* a match
*/
if (dev_id == table->dev_id)
return table;
}
}
return false;
return NULL;
}
static struct gpio_desc *gpio_desc_table_match(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags,
struct gpiod_lookup_table *table)
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
struct gpio_desc *desc;
struct gpio_desc *desc = ERR_PTR(-ENOENT);
struct gpiod_lookup_table *table;
struct gpiod_lookup *p;
struct gpio_chip *gc;
lockdep_assert_held(&gpio_lookup_lock);
guard(mutex)(&gpio_lookup_lock);
table = gpiod_find_lookup_table(dev);
if (!table)
return desc;
for (p = &table->table[0]; p->key; p++) {
/* idx must always match exactly */
@ -4596,30 +4600,7 @@ static struct gpio_desc *gpio_desc_table_match(struct device *dev, const char *c
return desc;
}
return NULL;
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
struct gpiod_lookup_table *table;
struct gpio_desc *desc;
guard(mutex)(&gpio_lookup_lock);
list_for_each_entry(table, &gpio_lookup_list, list) {
if (!gpiod_match_lookup_table(dev, table))
continue;
desc = gpio_desc_table_match(dev, con_id, idx, flags, table);
if (!desc)
continue;
/* On IS_ERR() or match. */
return desc;
}
return ERR_PTR(-ENOENT);
return desc;
}
static int platform_gpio_count(struct device *dev, const char *con_id)
@ -4629,16 +4610,14 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
unsigned int count = 0;
scoped_guard(mutex, &gpio_lookup_lock) {
list_for_each_entry(table, &gpio_lookup_list, list) {
if (!gpiod_match_lookup_table(dev, table))
continue;
table = gpiod_find_lookup_table(dev);
if (!table)
return -ENOENT;
for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id &&
!strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
}
for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
}
}
@ -4717,8 +4696,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
* lookup table for the proxy device as previously
* we only knew the consumer's fwnode.
*/
ret = gpio_shared_add_proxy_lookup(consumer, con_id,
lookupflags);
ret = gpio_shared_add_proxy_lookup(consumer, lookupflags);
if (ret)
return ERR_PTR(ret);

View File

@ -3445,10 +3445,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
/* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@ -5866,9 +5867,6 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
if (ret)
goto mode1_reset_failed;
/* enable mmio access after mode 1 reset completed */
adev->no_hw_access = false;
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)

View File

@ -89,16 +89,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
{
af->fence_wptr_start = af->ring->wptr;
}
static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
{
af->fence_wptr_end = af->ring->wptr;
}
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@ -126,10 +116,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@ -721,7 +709,6 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
bool reemitted = false;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
@ -739,9 +726,7 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
if (fence->reemitted > 1)
reemitted = true;
else if (fence == af)
if (fence == af)
dma_fence_set_error(&fence->base, -ETIME);
else if (fence->context == af->context)
dma_fence_set_error(&fence->base, -ECANCELED);
@ -749,12 +734,9 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
if (reemitted) {
/* if we've already reemitted once then just cancel everything */
amdgpu_fence_driver_force_completion(af->ring);
af->ring->ring_backup_entries_to_copy = 0;
}
/* signal the guilty fence */
amdgpu_fence_write(ring, (u32)af->base.seqno);
amdgpu_fence_process(ring);
}
void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
@ -802,18 +784,10 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
/* save everything if the ring is not guilty, otherwise
* just save the content from other contexts.
*/
if (!fence->reemitted &&
(!guilty_fence || (fence->context != guilty_fence->context))) {
if (!guilty_fence || (fence->context != guilty_fence->context))
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
} else if (!fence->reemitted) {
/* always save the fence */
amdgpu_ring_backup_unprocessed_command(ring,
fence->fence_wptr_start,
fence->fence_wptr_end);
}
wptr = fence->wptr;
fence->reemitted++;
}
rcu_read_unlock();
} while (last_seq != seq);

View File

@ -318,36 +318,12 @@ void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
static int isp_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_resume)
return isp->funcs->hw_resume(isp);
return -ENODEV;
}
static int isp_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_suspend)
return isp->funcs->hw_suspend(isp);
return -ENODEV;
}
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
.suspend = isp_suspend,
.resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};

View File

@ -38,8 +38,6 @@ struct amdgpu_isp;
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
int (*hw_suspend)(struct amdgpu_isp *isp);
int (*hw_resume)(struct amdgpu_isp *isp);
};
struct amdgpu_isp {

View File

@ -201,9 +201,6 @@ static enum amd_ip_block_type
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
break;
default:
type = AMD_IP_BLOCK_TYPE_NUM;
break;
@ -724,9 +721,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMD_IP_BLOCK_TYPE_UVD:
count = adev->uvd.num_uvd_inst;
break;
case AMD_IP_BLOCK_TYPE_VPE:
count = adev->vpe.num_instances;
break;
/* For all other IP block types not listed in the switch statement
* the ip status is valid here and the instance count is one.
*/

View File

@ -144,15 +144,10 @@ struct amdgpu_fence {
struct amdgpu_ring *ring;
ktime_t start_timestamp;
/* wptr for the total submission for resets */
/* wptr for the fence for resets */
u64 wptr;
/* fence context for resets */
u64 context;
/* has this fence been reemitted */
unsigned int reemitted;
/* wptr for the fence for the submission */
u64 fence_wptr_start;
u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;

View File

@ -26,7 +26,6 @@
*/
#include <linux/gpio/machine.h>
#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@ -146,9 +145,6 @@ static int isp_genpd_add_device(struct device *dev, void *data)
return -ENODEV;
}
/* The devices will be managed by the pm ops from the parent */
dev_pm_syscore_device(dev, true);
exit:
/* Continue to add */
return 0;
@ -181,47 +177,12 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
return -ENODEV;
}
dev_pm_syscore_device(dev, false);
exit:
/* Continue to remove */
return 0;
}
static int isp_suspend_device(struct device *dev, void *data)
{
return pm_runtime_force_suspend(dev);
}
static int isp_resume_device(struct device *dev, void *data)
{
return pm_runtime_force_resume(dev);
}
static int isp_v4_1_1_hw_suspend(struct amdgpu_isp *isp)
{
int r;
r = device_for_each_child(isp->parent, NULL,
isp_suspend_device);
if (r)
dev_err(isp->parent, "failed to suspend hw devices (%d)\n", r);
return r;
}
static int isp_v4_1_1_hw_resume(struct amdgpu_isp *isp)
{
int r;
r = device_for_each_child(isp->parent, NULL,
isp_resume_device);
if (r)
dev_err(isp->parent, "failed to resume hw device (%d)\n", r);
return r;
}
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
const struct software_node *amd_camera_node, *isp4_node;
@ -408,8 +369,6 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
static const struct isp_funcs isp_v4_1_1_funcs = {
.hw_init = isp_v4_1_1_hw_init,
.hw_fini = isp_v4_1_1_hw_fini,
.hw_suspend = isp_v4_1_1_hw_suspend,
.hw_resume = isp_v4_1_1_hw_resume,
};
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)

View File

@ -763,14 +763,14 @@ static enum bp_result bios_parser_encoder_control(
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac1_encoder_control(
bp, cntl->action,
bp, cntl->action == ENCODER_CONTROL_ENABLE,
cntl->pixel_clock, ATOM_DAC1_PS2);
} else if (cntl->engine_id == ENGINE_ID_DACB) {
if (!bp->cmd_tbl.dac2_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac2_encoder_control(
bp, cntl->action,
bp, cntl->action == ENCODER_CONTROL_ENABLE,
cntl->pixel_clock, ATOM_DAC1_PS2);
}

View File

@ -1797,30 +1797,7 @@ static enum bp_result select_crtc_source_v3(
&params.ucEncodeMode))
return BP_RESULT_BADINPUT;
switch (bp_params->color_depth) {
case COLOR_DEPTH_UNDEFINED:
params.ucDstBpc = PANEL_BPC_UNDEFINE;
break;
case COLOR_DEPTH_666:
params.ucDstBpc = PANEL_6BIT_PER_COLOR;
break;
default:
case COLOR_DEPTH_888:
params.ucDstBpc = PANEL_8BIT_PER_COLOR;
break;
case COLOR_DEPTH_101010:
params.ucDstBpc = PANEL_10BIT_PER_COLOR;
break;
case COLOR_DEPTH_121212:
params.ucDstBpc = PANEL_12BIT_PER_COLOR;
break;
case COLOR_DEPTH_141414:
dm_error("14-bit color not supported by SelectCRTC_Source v3\n");
break;
case COLOR_DEPTH_161616:
params.ucDstBpc = PANEL_16BIT_PER_COLOR;
break;
}
params.ucDstBpc = bp_params->bit_depth;
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
result = BP_RESULT_OK;
@ -1838,12 +1815,12 @@ static enum bp_result select_crtc_source_v3(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard);
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard);
@ -1869,15 +1846,12 @@ static void init_dac_encoder_control(struct bios_parser *bp)
static void dac_encoder_control_prepare_params(
DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard)
{
params->ucDacStandard = dac_standard;
if (action == ENCODER_CONTROL_SETUP ||
action == ENCODER_CONTROL_INIT)
params->ucAction = ATOM_ENCODER_INIT;
else if (action == ENCODER_CONTROL_ENABLE)
if (enable)
params->ucAction = ATOM_ENABLE;
else
params->ucAction = ATOM_DISABLE;
@ -1890,7 +1864,7 @@ static void dac_encoder_control_prepare_params(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@ -1899,7 +1873,7 @@ static enum bp_result dac1_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
action,
enable,
pixel_clock,
dac_standard);
@ -1911,7 +1885,7 @@ static enum bp_result dac1_encoder_control_v1(
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@ -1920,7 +1894,7 @@ static enum bp_result dac2_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
action,
enable,
pixel_clock,
dac_standard);

View File

@ -57,12 +57,12 @@ struct cmd_tbl {
struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac2_encoder_control)(
struct bios_parser *bp,
enum bp_encoder_control_action action,
bool enable,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac1_output_control)(

View File

@ -30,11 +30,7 @@ dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
frame_warn_limit := 4096
else
frame_warn_limit := 3072
endif
frame_warn_limit := 3072
else
frame_warn_limit := 2048
endif

View File

@ -77,14 +77,32 @@ static unsigned int dscceComputeDelay(
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@ -98,6 +116,7 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@ -105,6 +124,9 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@ -113,7 +135,14 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata);
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
@ -265,23 +294,62 @@ static void CalculateDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
enum clock_change_support *DRAMClockChangeSupport);
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
@ -742,12 +810,29 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@ -761,6 +846,7 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@ -768,6 +854,9 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@ -776,10 +865,15 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata)
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
struct vba_vars_st *v = &mode_lib->vba;
double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
@ -811,26 +905,26 @@ static bool CalculatePrefetchSchedule(
double Tdmec = 0;
double Tdmsks = 0;
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
HostVMInefficiencyFactor = v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
v->MaxInterDCNTileRepeaters,
MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
DynamicMetadataTransmittedBytes,
DynamicMetadataLinesBeforeActiveRequired,
myPipe->InterlaceEnable,
v->ProgressiveToInterlaceUnitInOPP,
ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
@ -838,16 +932,16 @@ static bool CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true) {
v->Tdmdl[k] = TWait + Tvm_trips + trip_to_mem;
if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
} else {
v->Tdmdl[k] = TWait + UrgentExtraLatency;
*Tdmdl = TWait + UrgentExtraLatency;
}
if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < Tsetup + v->Tdmdl[k] + Tdmbf + Tdmec + Tdmsks) {
if (DynamicMetadataEnable == true) {
if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
@ -855,39 +949,39 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
v->Tdmdl_vm[k] = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DISPCLKCycles = v->DISPCLKDelaySubtotal;
DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
v->DSTXAfterScaler[k] = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
v->DSTXAfterScaler[k] = v->DSTXAfterScaler[k] + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
*DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && v->ProgressiveToInterlaceUnitInOPP))
v->DSTYAfterScaler[k] = 1;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
v->DSTYAfterScaler[k] = 0;
*DSTYAfterScaler = 0;
DSTTotalPixelsAfterScaler = v->DSTYAfterScaler[k] * myPipe->HTotal + v->DSTXAfterScaler[k];
v->DSTYAfterScaler[k] = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
v->DSTXAfterScaler[k] = DSTTotalPixelsAfterScaler - ((double) (v->DSTYAfterScaler[k] * myPipe->HTotal));
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
MyError = false;
@ -896,33 +990,33 @@ static bool CalculatePrefetchSchedule(
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
if (v->GPUVMEnable) {
if (v->GPUVMMaxPageTableLevels >= 3) {
v->Tno_bw[k] = UrgentExtraLatency + trip_to_mem * ((v->GPUVMMaxPageTableLevels - 2) - 1);
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
} else
v->Tno_bw[k] = 0;
*Tno_bw = 0;
} else if (!myPipe->DCCEnable)
v->Tno_bw[k] = LineTime;
*Tno_bw = LineTime;
else
v->Tno_bw[k] = LineTime / 4;
*Tno_bw = LineTime / 4;
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, v->Tdmdl[k])) / LineTime
- (v->DSTYAfterScaler[k] + v->DSTXAfterScaler[k] / myPipe->HTotal);
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k]) / Tsw_oto;
prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
if (GPUVMEnable == true) {
Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
@ -948,10 +1042,10 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", v->Tdmdl_vm[k]);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", v->DSTXAfterScaler[k]);
dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)v->DSTYAfterScaler[k]);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@ -965,26 +1059,26 @@ static bool CalculatePrefetchSchedule(
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
if (Tpre_rounded - v->Tno_bw[k] > 0)
if (Tpre_rounded - *Tno_bw > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - v->Tno_bw[k]);
+ PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
/ (Tpre_rounded - *Tno_bw);
else
PrefetchBandwidth1 = 0;
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]) > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]);
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
}
if (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded > 0)
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
v->BytePerPixelC[k]) /
(Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded);
BytePerPixelC) /
(Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
@ -992,7 +1086,7 @@ static bool CalculatePrefetchSchedule(
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
swath_width_chroma_ub * v->BytePerPixelC[k]) / (Tpre_rounded -
swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
@ -1002,7 +1096,7 @@ static bool CalculatePrefetchSchedule(
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
@ -1013,7 +1107,7 @@ static bool CalculatePrefetchSchedule(
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
@ -1024,7 +1118,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth2 > 0) {
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
@ -1035,7 +1129,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth3 > 0) {
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
@ -1058,13 +1152,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
if (v->GPUVMEnable) {
Tvm_equ = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((v->GPUVMEnable || myPipe->DCCEnable)) {
if ((GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@ -1133,7 +1227,7 @@ static bool CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * v->BytePerPixelC[k] * swath_width_chroma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
@ -1149,9 +1243,9 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
@ -1182,7 +1276,7 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
v->prefetch_vmrow_bw[k] = dml_max(prefetch_vm_bw, prefetch_row_bw);
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
@ -2343,12 +2437,30 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
k,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
v->DSCDelay[k],
v->DPPCLKDelaySubtotal
+ v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
@ -2362,6 +2474,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
@ -2369,6 +2482,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
@ -2377,7 +2493,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k]);
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
@ -2607,23 +2730,62 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLK,
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
&DRAMClockChangeSupport);
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&DRAMClockChangeSupport,
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@ -4608,12 +4770,29 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
k,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
&myPipe,
v->DSCDelayPerState[i][k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
@ -4627,6 +4806,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
@ -4634,6 +4814,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
@ -4642,7 +4825,14 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
&v->NoTimeForDynamicMetadata[i][j][k]);
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@ -4817,23 +5007,62 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLKPerState[i],
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
&v->DRAMClockChangeSupport[i][j]);
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&v->DRAMClockChangeSupport[i][j],
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
}
}
@ -4950,25 +5179,63 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
enum clock_change_support *DRAMClockChangeSupport)
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
@ -4987,101 +5254,101 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
v->TotalActiveDPP = 0;
v->TotalDCCActiveDPP = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalActiveDPP = v->TotalActiveDPP + DPPPerPlane[k];
if (v->DCCEnable[k] == true) {
v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + DPPPerPlane[k];
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
if (DCCEnable[k] == true) {
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
}
}
v->UrgentWatermark = UrgentLatency + ExtraLatency;
*UrgentWatermark = UrgentLatency + ExtraLatency;
v->DRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->UrgentWatermark;
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
v->TotalActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackUrgentWatermark = v->WritebackLatency;
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackUrgentWatermark = WritebackLatency;
} else {
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
*WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency;
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
} else {
v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
for (k = 0; k < NumberOfActivePlanes; ++k) {
v->LBLatencyHidingSourceLinesY = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (v->WritebackEnable[k] == true) {
if (WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
v->MinActiveDRAMClockChangeMargin = 999999;
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
if (v->BlendingAndTiming[k] == k) {
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@ -5089,40 +5356,40 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->FinalDRAMClockChangeLatency;
*MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
v->TotalNumberOfActiveOTG = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
mode_lib->vba.TotalNumberOfActiveOTG = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0) {
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
}
}
v->StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
v->StutterEnterPlusExitWatermark = dml_max(v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
*StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}

View File

@ -1610,12 +1610,38 @@ dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
struct dc_bios *bios = link->ctx->dc_bios;
struct bp_crtc_source_select crtc_source_select = {0};
enum engine_id engine_id = link->link_enc->preferred_engine;
uint8_t bit_depth;
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
engine_id = link->link_enc->analog_engine;
switch (pipe_ctx->stream->timing.display_color_depth) {
case COLOR_DEPTH_UNDEFINED:
bit_depth = 0;
break;
case COLOR_DEPTH_666:
bit_depth = 6;
break;
default:
case COLOR_DEPTH_888:
bit_depth = 8;
break;
case COLOR_DEPTH_101010:
bit_depth = 10;
break;
case COLOR_DEPTH_121212:
bit_depth = 12;
break;
case COLOR_DEPTH_141414:
bit_depth = 14;
break;
case COLOR_DEPTH_161616:
bit_depth = 16;
break;
}
crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
crtc_source_select.bit_depth = bit_depth;
crtc_source_select.engine_id = engine_id;
crtc_source_select.sink_signal = pipe_ctx->stream->signal;

View File

@ -932,7 +932,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum engine_id engine_id = link_enc->preferred_engine;
enum dal_device_type device_type = DEVICE_TYPE_CRT;
enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
enum bp_result bp_result;
uint32_t enum_id;
switch (engine_id) {
@ -946,9 +946,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
break;
}
if (bios->funcs->dac_load_detection)
bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
return bp_result == BP_RESULT_OK;
}

View File

@ -136,7 +136,7 @@ struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
enum signal_type sink_signal;
enum dc_color_depth color_depth;
uint8_t bit_depth;
};
struct bp_transmitter_control {

View File

@ -2455,21 +2455,24 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
pptable->PcieGenSpeed[i] > pcie_gen_cap ?
pcie_gen_cap : pptable->PcieGenSpeed[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
pptable->PcieLaneCount[i] > pcie_width_cap ?
pcie_width_cap : pptable->PcieLaneCount[i];
smu_pcie_arg = i << 16;
smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_gen[i] << 8;
smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_lane[i];
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
return ret;
if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
pptable->PcieLaneCount[i] > pcie_width_cap) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
pptable->PcieGenSpeed[i] > pcie_gen_cap ?
pcie_gen_cap : pptable->PcieGenSpeed[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
pptable->PcieLaneCount[i] > pcie_width_cap ?
pcie_width_cap : pptable->PcieLaneCount[i];
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_gen_cap << 8;
smu_pcie_arg |= pcie_width_cap;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
break;
}
}
return ret;

View File

@ -2923,13 +2923,8 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
break;
}
if (!ret) {
/* disable mmio access while doing mode 1 reset*/
smu->adev->no_hw_access = true;
/* ensure no_hw_access is globally visible before any MMIO */
smp_mb();
if (!ret)
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
}
return ret;
}

View File

@ -2143,15 +2143,10 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
if (!ret) {
if (amdgpu_emu_mode == 1) {
if (amdgpu_emu_mode == 1)
msleep(50000);
} else {
/* disable mmio access while doing mode 1 reset*/
smu->adev->no_hw_access = true;
/* ensure no_hw_access is globally visible before any MMIO */
smp_mb();
else
msleep(1000);
}
}
return ret;

View File

@ -1162,18 +1162,8 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
new_state->self_refresh_active;
}
/**
* drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the CRTC needs
* it, disables the bridge chain all the way, then disables the encoder
* afterwards.
*/
void
drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
struct drm_atomic_state *state)
static void
encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@ -1239,18 +1229,9 @@ drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
/**
* drm_atomic_helper_commit_crtc_disable - disable CRTSs
* @dev: DRM device
* @state: the driver state object
*
* Loops over all CRTCs in the current state and if the CRTC needs
* it, disables it.
*/
void
drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
static void
crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@ -1301,18 +1282,9 @@ drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_
drm_crtc_vblank_put(crtc);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
/**
* drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the CRTC needs
* it, post-disables all encoder bridges.
*/
void
drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
static void
encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@ -1363,16 +1335,15 @@ drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, str
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
encoder_bridge_disable(dev, state);
drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
crtc_disable(dev, state);
drm_atomic_helper_commit_crtc_disable(dev, state);
encoder_bridge_post_disable(dev, state);
}
/**
@ -1475,17 +1446,8 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
/**
* drm_atomic_helper_commit_crtc_set_mode - set the new mode
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the mode has
* changed, change the mode of the CRTC, then call down the bridge
* chain and change the mode in all bridges as well.
*/
void
drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@ -1546,7 +1508,6 @@ drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
@ -1570,21 +1531,12 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
drm_atomic_helper_commit_crtc_set_mode(dev, state);
crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
/**
* drm_atomic_helper_commit_writebacks - issue writebacks
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over the connectors, checks if the new state requires
* a writeback job to be issued and in that case issues an atomic
* commit on each connector.
*/
void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state)
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1603,18 +1555,9 @@ void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
/**
* drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over the connectors and if the CRTC needs it, pre-enables
* the entire bridge chain.
*/
void
drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
static void
encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1645,18 +1588,9 @@ drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struc
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
/**
* drm_atomic_helper_commit_crtc_enable - enables the CRTCs
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over CRTCs in the new state, and of the CRTC needs
* it, enables it.
*/
void
drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
static void
crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@ -1685,18 +1619,9 @@ drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_s
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
/**
* drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over all connectors in the new state, and of the CRTC needs
* it, enables the entire bridge chain.
*/
void
drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
static void
encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1739,7 +1664,6 @@ drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct dr
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
@ -1758,11 +1682,11 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *state)
{
drm_atomic_helper_commit_crtc_enable(dev, state);
encoder_bridge_pre_enable(dev, state);
drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
crtc_enable(dev, state);
drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}

View File

@ -366,9 +366,6 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
if (helper->info->state != FBINFO_STATE_RUNNING)
return;
drm_fb_helper_fb_dirty(helper);
}
@ -735,13 +732,6 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
/*
* Cancel pending damage work. During GPU reset, VBlank
* interrupts are disabled and drm_fb_helper_fb_dirty()
* would wait for VBlank timeout otherwise.
*/
cancel_work_sync(&fb_helper->damage_work);
console_lock();
} else {

View File

@ -1692,7 +1692,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
mod_delayed_work(system_percpu_wq, &hdata->hotplug_work,
mod_delayed_work(system_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;

View File

@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
/*
* set flag to request the DSI host bridge be pre-enabled before device bridge
* in the chain, so the DSI host is ready when the device bridge is pre-enabled
*/
dsi->next_bridge->pre_enable_prev_first = true;
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);

View File

@ -30,9 +30,6 @@ ad102_gsp = {
.booter.ctor = ga102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -337,12 +337,18 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp)
nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
}
void
nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
}
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{

View File

@ -47,9 +47,6 @@ ga100_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -158,9 +158,6 @@ ga102_gsp_r535 = {
.booter.ctor = ga102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -7,8 +7,9 @@ enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp);
void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
struct nvkm_gsp_fwif {
int version;
@ -51,11 +52,6 @@ struct nvkm_gsp_func {
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
struct {
int (*ctor)(struct nvkm_gsp *);
void (*dtor)(struct nvkm_gsp *);
} fwsec_sb;
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
@ -71,8 +67,6 @@ extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_init(struct nvkm_gsp *);
int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
@ -97,18 +91,5 @@ int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
static inline int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
{
if (gsp->func->fwsec_sb.ctor)
return gsp->func->fwsec_sb.ctor(gsp);
return 0;
}
static inline void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
if (gsp->func->fwsec_sb.dtor)
gsp->func->fwsec_sb.dtor(gsp);
}
extern const struct nvkm_gsp_func gv100_gsp;
#endif

View File

@ -30,18 +30,6 @@
#include <nvfw/fw.h>
#include <nvfw/hs.h>
int
tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_sb_init(gsp);
}
void
tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
}
static int
tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@ -382,9 +370,6 @@ tu102_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -30,9 +30,6 @@ tu116_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -295,7 +295,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
goto dev_put;
return ret;
}
ret = pl111_modeset_init(drm);

View File

@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[] /*__counted_by(ucNumEntries)*/;
UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{

View File

@ -26,33 +26,9 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
/*
* TI's OLDI and DSI encoders need to be set up before the crtc is
* enabled. Thus drm_atomic_helper_commit_modeset_enables() and
* drm_atomic_helper_commit_modeset_disables() cannot be used here, as
* they enable the crtc before bridges' pre-enable, and disable the crtc
* after bridges' post-disable.
*
* Open code the functions here and first call the bridges' pre-enables,
* then crtc enable, then bridges' post-enable (and vice versa for
* disable).
*/
drm_atomic_helper_commit_encoder_bridge_disable(ddev, old_state);
drm_atomic_helper_commit_crtc_disable(ddev, old_state);
drm_atomic_helper_commit_encoder_bridge_post_disable(ddev, old_state);
drm_atomic_helper_update_legacy_modeset_state(ddev, old_state);
drm_atomic_helper_calc_timestamping_constants(old_state);
drm_atomic_helper_commit_crtc_set_mode(ddev, old_state);
drm_atomic_helper_commit_planes(ddev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_encoder_bridge_pre_enable(ddev, old_state);
drm_atomic_helper_commit_crtc_enable(ddev, old_state);
drm_atomic_helper_commit_encoder_bridge_enable(ddev, old_state);
drm_atomic_helper_commit_writebacks(ddev, old_state);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);

View File

@ -3,7 +3,7 @@ config NOVA_CORE
depends on 64BIT
depends on PCI
depends on RUST
select RUST_FW_LOADER_ABSTRACTIONS
depends on RUST_FW_LOADER_ABSTRACTIONS
select AUXILIARY_BUS
default n
help

View File

@ -588,23 +588,21 @@ impl Cmdq {
header.length(),
);
let payload_length = header.payload_length();
// Check that the driver read area is large enough for the message.
if slice_1.len() + slice_2.len() < payload_length {
if slice_1.len() + slice_2.len() < header.length() {
return Err(EIO);
}
// Cut the message slices down to the actual length of the message.
let (slice_1, slice_2) = if slice_1.len() > payload_length {
// PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
(slice_1.split_at(payload_length).0, &slice_2[0..0])
let (slice_1, slice_2) = if slice_1.len() > header.length() {
// PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
(slice_1.split_at(header.length()).0, &slice_2[0..0])
} else {
(
slice_1,
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
// large as `payload_length`.
slice_2.split_at(payload_length - slice_1.len()).0,
// large as `msg_header.length()`.
slice_2.split_at(header.length() - slice_1.len()).0,
)
};

View File

@ -141,8 +141,8 @@ unsafe impl AsBytes for GspFwWprMeta {}
// are valid.
unsafe impl FromBytes for GspFwWprMeta {}
type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1;
type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
impl GspFwWprMeta {
/// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
@ -150,8 +150,8 @@ impl GspFwWprMeta {
pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
Self(bindings::GspFwWprMeta {
// CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
magic: r570_144::GSP_FW_WPR_META_MAGIC as u64,
revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION),
sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
@ -315,19 +315,19 @@ impl From<MsgFunction> for u32 {
#[repr(u32)]
pub(crate) enum SeqBufOpcode {
// Core operation opcodes
CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
// Delay opcode
DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
// Register operation opcodes
RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
}
impl fmt::Display for SeqBufOpcode {
@ -351,25 +351,25 @@ impl TryFrom<u32> for SeqBufOpcode {
fn try_from(value: u32) -> Result<SeqBufOpcode> {
match value {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
Ok(SeqBufOpcode::CoreReset)
}
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
Ok(SeqBufOpcode::CoreResume)
}
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
Ok(SeqBufOpcode::CoreStart)
}
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
Ok(SeqBufOpcode::CoreWaitForHalt)
}
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
Ok(SeqBufOpcode::RegModify)
}
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
_ => Err(EINVAL),
}
}
@ -385,7 +385,7 @@ impl From<SeqBufOpcode> for u32 {
/// Wrapper for GSP sequencer register write payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
impl RegWritePayload {
/// Returns the register address.
@ -408,7 +408,7 @@ unsafe impl AsBytes for RegWritePayload {}
/// Wrapper for GSP sequencer register modify payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
impl RegModifyPayload {
/// Returns the register address.
@ -436,7 +436,7 @@ unsafe impl AsBytes for RegModifyPayload {}
/// Wrapper for GSP sequencer register poll payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
impl RegPollPayload {
/// Returns the register address.
@ -469,7 +469,7 @@ unsafe impl AsBytes for RegPollPayload {}
/// Wrapper for GSP sequencer delay payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
impl DelayUsPayload {
/// Returns the delay value in microseconds.
@ -487,7 +487,7 @@ unsafe impl AsBytes for DelayUsPayload {}
/// Wrapper for GSP sequencer register store payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
impl RegStorePayload {
/// Returns the register address.
@ -510,7 +510,7 @@ unsafe impl AsBytes for RegStorePayload {}
/// Wrapper for GSP sequencer buffer command.
#[repr(transparent)]
pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD);
impl SequencerBufferCmd {
/// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
@ -612,7 +612,7 @@ unsafe impl AsBytes for SequencerBufferCmd {}
/// Wrapper for GSP run CPU sequencer RPC.
#[repr(transparent)]
pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00);
impl RunCpuSequencer {
/// Returns the command index.
@ -797,6 +797,13 @@ impl bindings::rpc_message_header_v {
}
}
// SAFETY: We can't derive the Zeroable trait for this binding because the
// procedural macro doesn't support the syntax used by bindgen to create the
// __IncompleteArrayField types. So instead we implement it here, which is safe
// because these are explicitly padded structures only containing types for
// which any bit pattern, including all zeros, is valid.
unsafe impl Zeroable for bindings::rpc_message_header_v {}
/// GSP Message Element.
///
/// This is essentially a message header expected to be followed by the message data.
@ -846,16 +853,11 @@ impl GspMsgElement {
self.inner.checkSum = checksum;
}
/// Returns the length of the message's payload.
pub(crate) fn payload_length(&self) -> usize {
// `rpc.length` includes the length of the RPC message header.
num::u32_as_usize(self.inner.rpc.length)
.saturating_sub(size_of::<bindings::rpc_message_header_v>())
}
/// Returns the total length of the message, message and RPC headers included.
/// Returns the total length of the message.
pub(crate) fn length(&self) -> usize {
size_of::<Self>() + self.payload_length()
// `rpc.length` includes the length of the GspRpcHeader but not the message header.
size_of::<Self>() - size_of::<bindings::rpc_message_header_v>()
+ num::u32_as_usize(self.inner.rpc.length)
}
// Returns the sequence number of the message.

View File

@ -24,11 +24,8 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
use kernel::ffi;
use pin_init::MaybeZeroable;
use kernel::{
ffi,
prelude::Zeroable, //
};
include!("r570_144/bindings.rs");
// SAFETY: This type has a size of zero, so its inclusion into another type should not affect their
// ability to implement `Zeroable`.
unsafe impl<T> kernel::prelude::Zeroable for __IncompleteArrayField<T> {}

View File

@ -320,12 +320,11 @@ pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130;
pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131;
pub type _bindgen_ty_3 = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub totalVFs: u32_,
pub firstVfOffset: u32_,
pub vfFeatureMask: u32_,
pub __bindgen_padding_0: [u8; 4usize],
pub FirstVFBar0Address: u64_,
pub FirstVFBar1Address: u64_,
pub FirstVFBar2Address: u64_,
@ -341,26 +340,23 @@ pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub bClientRmAllocatedCtxBuffer: u8_,
pub bNonPowerOf2ChannelCountSupported: u8_,
pub bVfResizableBAR1Supported: u8_,
pub __bindgen_padding_1: [u8; 7usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
pub BoardID: u32_,
pub chipSKU: [ffi::c_char; 9usize],
pub chipSKUMod: [ffi::c_char; 5usize],
pub __bindgen_padding_0: [u8; 2usize],
pub skuConfigVersion: u32_,
pub project: [ffi::c_char; 5usize],
pub projectSKU: [ffi::c_char; 5usize],
pub CDP: [ffi::c_char; 6usize],
pub projectSKUMod: [ffi::c_char; 2usize],
pub __bindgen_padding_1: [u8; 2usize],
pub businessCycle: u32_,
}
pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize];
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub base: u64_,
pub limit: u64_,
@ -372,14 +368,13 @@ pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
pub numFBRegions: u32_,
pub __bindgen_padding_0: [u8; 4usize],
pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Copy, Clone)]
pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
pub index: u32_,
pub flags: u32_,
@ -396,14 +391,14 @@ impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct DOD_METHOD_DATA {
pub status: u32_,
pub acpiIdListLen: u32_,
pub acpiIdList: [u32_; 16usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct JT_METHOD_DATA {
pub status: u32_,
pub jtCaps: u32_,
@ -412,14 +407,14 @@ pub struct JT_METHOD_DATA {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct MUX_METHOD_DATA_ELEMENT {
pub acpiId: u32_,
pub mode: u32_,
pub status: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct MUX_METHOD_DATA {
pub tableLen: u32_,
pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
@ -427,13 +422,13 @@ pub struct MUX_METHOD_DATA {
pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct CAPS_METHOD_DATA {
pub status: u32_,
pub optimusCaps: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct ACPI_METHOD_DATA {
pub bValid: u8_,
pub __bindgen_padding_0: [u8; 3usize],
@ -443,20 +438,20 @@ pub struct ACPI_METHOD_DATA {
pub capsMethodData: CAPS_METHOD_DATA,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS {
pub headIndex: u32_,
pub maxHResolution: u32_,
pub maxVResolution: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS {
pub numHeads: u32_,
pub maxNumHeads: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct BUSINFO {
pub deviceID: u16_,
pub vendorID: u16_,
@ -466,7 +461,7 @@ pub struct BUSINFO {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GSP_VF_INFO {
pub totalVFs: u32_,
pub firstVFOffset: u32_,
@ -479,37 +474,34 @@ pub struct GSP_VF_INFO {
pub __bindgen_padding_0: [u8; 5usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GSP_PCIE_CONFIG_REG {
pub linkCap: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct EcidManufacturingInfo {
pub ecidLow: u32_,
pub ecidHigh: u32_,
pub ecidExtended: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct FW_WPR_LAYOUT_OFFSET {
pub nonWprHeapOffset: u64_,
pub frtsOffset: u64_,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Copy, Clone)]
pub struct GspStaticConfigInfo_t {
pub grCapsBits: [u8_; 23usize],
pub __bindgen_padding_0: u8,
pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS,
pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS,
pub __bindgen_padding_1: [u8; 4usize],
pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS,
pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS,
pub sriovMaxGfid: u32_,
pub engineCaps: [u32_; 3usize],
pub poisonFuseEnabled: u8_,
pub __bindgen_padding_2: [u8; 7usize],
pub fb_length: u64_,
pub fbio_mask: u64_,
pub fb_bus_width: u32_,
@ -535,20 +527,16 @@ pub struct GspStaticConfigInfo_t {
pub bIsMigSupported: u8_,
pub RTD3GC6TotalBoardPower: u16_,
pub RTD3GC6PerstDelay: u16_,
pub __bindgen_padding_3: [u8; 2usize],
pub bar1PdeBase: u64_,
pub bar2PdeBase: u64_,
pub bVbiosValid: u8_,
pub __bindgen_padding_4: [u8; 3usize],
pub vbiosSubVendor: u32_,
pub vbiosSubDevice: u32_,
pub bPageRetirementSupported: u8_,
pub bSplitVasBetweenServerClientRm: u8_,
pub bClRootportNeedsNosnoopWAR: u8_,
pub __bindgen_padding_5: u8,
pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS,
pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS,
pub __bindgen_padding_6: [u8; 4usize],
pub displaylessMaxPixels: u64_,
pub hInternalClient: u32_,
pub hInternalDevice: u32_,
@ -570,7 +558,7 @@ impl Default for GspStaticConfigInfo_t {
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GspSystemInfo {
pub gpuPhysAddr: u64_,
pub gpuPhysFbAddr: u64_,
@ -627,7 +615,7 @@ pub struct GspSystemInfo {
pub hostPageSize: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub sharedMemPhysAddr: u64_,
pub pageTableEntryCount: u32_,
@ -636,7 +624,7 @@ pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub statQueueOffset: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GSP_SR_INIT_ARGUMENTS {
pub oldLevel: u32_,
pub flags: u32_,
@ -644,7 +632,7 @@ pub struct GSP_SR_INIT_ARGUMENTS {
pub __bindgen_padding_0: [u8; 3usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GSP_ARGUMENTS_CACHED {
pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS,
pub srInitArguments: GSP_SR_INIT_ARGUMENTS,
@ -654,13 +642,13 @@ pub struct GSP_ARGUMENTS_CACHED {
pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 {
pub pa: u64_,
pub size: u64_,
}
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone, Zeroable)]
pub union rpc_message_rpc_union_field_v03_00 {
pub spare: u32_,
pub cpuRmGfid: u32_,
@ -676,7 +664,6 @@ impl Default for rpc_message_rpc_union_field_v03_00 {
}
pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00;
#[repr(C)]
#[derive(MaybeZeroable)]
pub struct rpc_message_header_v03_00 {
pub header_version: u32_,
pub signature: u32_,
@ -699,7 +686,7 @@ impl Default for rpc_message_header_v03_00 {
}
pub type rpc_message_header_v = rpc_message_header_v03_00;
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone, Zeroable)]
pub struct GspFwWprMeta {
pub magic: u64_,
pub revision: u64_,
@ -734,19 +721,19 @@ pub struct GspFwWprMeta {
pub verified: u64_,
}
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone, Zeroable)]
pub union GspFwWprMeta__bindgen_ty_1 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 {
pub sysmemAddrOfSignature: u64_,
pub sizeOfSignature: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 {
pub gspFwHeapFreeListWprOffset: u32_,
pub unused0: u32_,
@ -762,13 +749,13 @@ impl Default for GspFwWprMeta__bindgen_ty_1 {
}
}
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone, Zeroable)]
pub union GspFwWprMeta__bindgen_ty_2 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub partitionRpcAddr: u64_,
pub partitionRpcRequestOffset: u16_,
@ -780,7 +767,7 @@ pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub lsUcodeVersion: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 {
pub partitionRpcPadding: [u32_; 4usize],
pub sysmemAddrOfCrashReportQueue: u64_,
@ -815,7 +802,7 @@ pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegion
pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2;
pub type LibosMemoryRegionLoc = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct LibosMemoryRegionInitArgument {
pub id8: LibosAddress,
pub pa: LibosAddress,
@ -825,7 +812,7 @@ pub struct LibosMemoryRegionInitArgument {
pub __bindgen_padding_0: [u8; 6usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct PACKED_REGISTRY_ENTRY {
pub nameOffset: u32_,
pub type_: u8_,
@ -834,14 +821,14 @@ pub struct PACKED_REGISTRY_ENTRY {
pub length: u32_,
}
#[repr(C)]
#[derive(Debug, Default, MaybeZeroable)]
#[derive(Debug, Default)]
pub struct PACKED_REGISTRY_TABLE {
pub size: u32_,
pub numEntries: u32_,
pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct msgqTxHeader {
pub version: u32_,
pub size: u32_,
@ -853,13 +840,13 @@ pub struct msgqTxHeader {
pub entryOff: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
pub struct msgqRxHeader {
pub readPtr: u32_,
}
#[repr(C)]
#[repr(align(8))]
#[derive(MaybeZeroable)]
#[derive(Zeroable)]
pub struct GSP_MSG_QUEUE_ELEMENT {
pub authTagBuffer: [u8_; 16usize],
pub aadBuffer: [u8_; 16usize],
@ -879,7 +866,7 @@ impl Default for GSP_MSG_QUEUE_ELEMENT {
}
}
#[repr(C)]
#[derive(Debug, Default, MaybeZeroable)]
#[derive(Debug, Default)]
pub struct rpc_run_cpu_sequencer_v17_00 {
pub bufferSizeDWord: u32_,
pub cmdIndex: u32_,
@ -897,20 +884,20 @@ pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_
pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8;
pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE {
pub addr: u32_,
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY {
pub addr: u32_,
pub mask: u32_,
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub addr: u32_,
pub mask: u32_,
@ -919,24 +906,24 @@ pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub error: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US {
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
#[derive(Debug, Default, Copy, Clone)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE {
pub addr: u32_,
pub index: u32_,
}
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone)]
pub struct GSP_SEQUENCER_BUFFER_CMD {
pub opCode: GSP_SEQ_BUF_OPCODE,
pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone, MaybeZeroable)]
#[derive(Copy, Clone)]
pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE,
pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY,

View File

@ -315,12 +315,12 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
fd_prepare_file(fdf)->private_data = req;
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
*alloc_fd = fd_publish(fdf);
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
atomic_inc_return(&mdev->request_id), *alloc_fd);
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
return 0;
err_free_req:

View File

@ -37,6 +37,7 @@
#define PCIE_CFG_STATUS17 0x44
#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
#define WAIT_LINKUP_TIMEOUT 4000
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
@ -349,10 +350,40 @@ static struct pci_ops meson_pci_ops = {
static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
u32 state12;
struct device *dev = pci->dev;
u32 speed_okay = 0;
u32 cnt = 0;
u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
return IS_SMLH_LINK_UP(state12) && IS_RDLH_LINK_UP(state12);
do {
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
smlh_up = IS_SMLH_LINK_UP(state12);
rdlh_up = IS_RDLH_LINK_UP(state12);
ltssm_up = IS_LTSSM_UP(state12);
if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
speed_okay = 1;
if (smlh_up)
dev_dbg(dev, "smlh_link_up is on\n");
if (rdlh_up)
dev_dbg(dev, "rdlh_link_up is on\n");
if (ltssm_up)
dev_dbg(dev, "ltssm_up is on\n");
if (speed_okay)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
return true;
cnt++;
udelay(10);
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)

View File

@ -1047,6 +1047,7 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
pcie->parf + PARF_NO_SNOOP_OVERRIDE);
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@ -1315,8 +1316,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
goto err_disable_phy;
}
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_ep_reset_deassert(pcie);
if (pcie->cfg->ops->config_sid) {
@ -1465,7 +1464,6 @@ static const struct qcom_pcie_cfg cfg_2_1_0 = {
static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
.no_l0s = true,
};
static const struct qcom_pcie_cfg cfg_2_3_3 = {

View File

@ -652,6 +652,13 @@ static bool vga_is_boot_device(struct vga_device *vgadev)
return true;
}
/*
* Vgadev has neither IO nor MEM enabled. If we haven't found any
* other VGA devices, it is the best candidate so far.
*/
if (!boot_vga)
return true;
return false;
}

View File

@ -89,11 +89,11 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
int *trigger_out,
int *polarity_out)
{
u32 gsi;
int gsi;
u8 pin;
struct acpi_prt_entry *entry;
int trigger = ACPI_LEVEL_SENSITIVE;
int ret, polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
if (!dev || !gsi_out || !trigger_out || !polarity_out)
@ -105,18 +105,17 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
entry = acpi_pci_irq_lookup(dev, pin);
if (entry) {
ret = 0;
if (entry->link)
ret = acpi_pci_link_allocate_irq(entry->link,
gsi = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&trigger, &polarity,
NULL, &gsi);
NULL);
else
gsi = entry->index;
} else
ret = -ENODEV;
gsi = -1;
if (ret < 0)
if (gsi < 0)
return -EINVAL;
*gsi_out = gsi;

View File

@ -2255,7 +2255,6 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
BTRFS_DATA_RELOC_TREE_OBJECTID, true);
if (IS_ERR(root)) {
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
ret = PTR_ERR(root);
goto out;
}

View File

@ -481,15 +481,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
ASSERT(size <= sectorsize);
/*
* The compressed size also needs to be no larger than a page.
* That's also why we only need one folio as the parameter.
* The compressed size also needs to be no larger than a sector.
* That's also why we only need one page as the parameter.
*/
if (compressed_folio) {
if (compressed_folio)
ASSERT(compressed_size <= sectorsize);
ASSERT(compressed_size <= PAGE_SIZE);
} else {
else
ASSERT(compressed_size == 0);
}
if (compressed_size && compressed_folio)
cur_size = compressed_size;
@ -576,18 +574,6 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
if (offset != 0)
return false;
/*
* Even for bs > ps cases, cow_file_range_inline() can only accept a
* single folio.
*
* This can be problematic and cause access beyond page boundary if a
* page sized folio is passed into that function.
* And encoded write is doing exactly that.
* So here limits the inlined extent size to PAGE_SIZE.
*/
if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
return false;
/* Inline extents are limited to sectorsize. */
if (size > fs_info->sectorsize)
return false;
@ -4048,6 +4034,11 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path
btrfs_set_inode_mapping_order(inode);
cache_index:
ret = btrfs_init_file_extent_tree(inode);
if (ret)
goto out;
btrfs_inode_set_file_extent_range(inode, 0,
round_up(i_size_read(vfs_inode), fs_info->sectorsize));
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
@ -4134,20 +4125,6 @@ cache_acl:
btrfs_ino(inode), btrfs_root_id(root), ret);
}
/*
* We don't need the path anymore, so release it to avoid holding a read
* lock on a leaf while calling btrfs_init_file_extent_tree(), which can
* allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
* dependency.
*/
btrfs_release_path(path);
ret = btrfs_init_file_extent_tree(inode);
if (ret)
goto out;
btrfs_inode_set_file_extent_range(inode, 0,
round_up(i_size_read(vfs_inode), fs_info->sectorsize));
if (!maybe_acls)
cache_no_acl(vfs_inode);

View File

@ -736,12 +736,14 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
*/
void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
{
if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
btrfs_info(fs_info,
"forcing free space tree for sector size %u with page size %lu",
fs_info->sectorsize, PAGE_SIZE);
if (fs_info->sectorsize < PAGE_SIZE) {
btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info,
"forcing free space tree for sector size %u with page size %lu",
fs_info->sectorsize, PAGE_SIZE);
btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
}
}
/*

View File

@ -190,7 +190,7 @@ static void do_abort_log_replay(struct walk_control *wc, const char *function,
btrfs_abort_transaction(wc->trans, error);
if (wc->subvol_path && wc->subvol_path->nodes[0]) {
if (wc->subvol_path->nodes[0]) {
btrfs_crit(fs_info,
"subvolume (root %llu) leaf currently being processed:",
btrfs_root_id(wc->root));

View File

@ -533,7 +533,6 @@ static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
dput(lower_dir_dentry);
end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
@ -585,7 +584,7 @@ ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
end_creating(lower_dentry);
end_removing(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;

View File

@ -1593,9 +1593,6 @@ EXPORT_SYMBOL(igrab);
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
* @isnew: return argument telling whether I_NEW was set when
* the inode was found in hash (the caller needs to
* wait for I_NEW to clear)
*
* Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented

View File

@ -832,7 +832,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter,
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
if (iter->fbatch) {
struct folio *folio = folio_batch_next(iter->fbatch);
if (!folio)
@ -929,7 +929,7 @@ static int iomap_write_begin(struct iomap_iter *iter,
* process so return and let the caller iterate and refill the batch.
*/
if (!folio) {
WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
WARN_ON_ONCE(!iter->fbatch);
return 0;
}
@ -1544,39 +1544,23 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
return status;
}
/**
* iomap_fill_dirty_folios - fill a folio batch with dirty folios
* @iter: Iteration structure
* @start: Start offset of range. Updated based on lookup progress.
* @end: End offset of range
* @iomap_flags: Flags to set on the associated iomap to track the batch.
*
* Returns the folio count directly. Also returns the associated control flag if
* the the batch lookup is performed and the expected offset of a subsequent
* lookup via out params. The caller is responsible to set the flag on the
* associated iomap.
*/
unsigned int
loff_t
iomap_fill_dirty_folios(
struct iomap_iter *iter,
loff_t *start,
loff_t end,
unsigned int *iomap_flags)
loff_t offset,
loff_t length)
{
struct address_space *mapping = iter->inode->i_mapping;
pgoff_t pstart = *start >> PAGE_SHIFT;
pgoff_t pend = (end - 1) >> PAGE_SHIFT;
unsigned int count;
pgoff_t start = offset >> PAGE_SHIFT;
pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
if (!iter->fbatch) {
*start = end;
return 0;
}
iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
if (!iter->fbatch)
return offset + length;
folio_batch_init(iter->fbatch);
count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
*start = (pstart << PAGE_SHIFT);
*iomap_flags |= IOMAP_F_FOLIO_BATCH;
return count;
filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
return (start << PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
@ -1585,21 +1569,17 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private)
{
struct folio_batch fbatch;
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
.private = private,
.fbatch = &fbatch,
};
struct address_space *mapping = inode->i_mapping;
int ret;
bool range_dirty;
folio_batch_init(&fbatch);
/*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
@ -1610,11 +1590,11 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
if (WARN_ON_ONCE(iter.fbatch &&
srcmap->type != IOMAP_UNWRITTEN))
return -EIO;
if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
if (!iter.fbatch &&
(srcmap->type == IOMAP_HOLE ||
srcmap->type == IOMAP_UNWRITTEN)) {
s64 status;

View File

@ -8,10 +8,10 @@
static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
if (iter->fbatch) {
folio_batch_release(iter->fbatch);
folio_batch_reinit(iter->fbatch);
iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH;
kfree(iter->fbatch);
iter->fbatch = NULL;
}
iter->status = 0;

View File

@ -369,19 +369,10 @@ locks_dispose_list(struct list_head *dispose)
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
locks_free_lock(file_lock(flc));
}
}
static void
lease_dispose_list(struct list_head *dispose)
{
struct file_lock_core *flc;
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
locks_free_lease(file_lease(flc));
if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
locks_free_lease(file_lease(flc));
else
locks_free_lock(file_lock(flc));
}
}
@ -585,50 +576,10 @@ lease_setup(struct file_lease *fl, void **priv)
__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
}
/**
* lease_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
lease_open_conflict(struct file *filp, const int arg)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static const struct lease_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
.lm_setup = lease_setup,
.lm_open_conflict = lease_open_conflict,
};
/*
@ -1669,7 +1620,7 @@ restart:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
list_empty(&new_fl->c.flc_blocked_member),
break_time);
@ -1692,7 +1643,7 @@ restart:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
free_lock:
locks_free_lease(new_fl);
return error;
@ -1776,7 +1727,7 @@ static int __fcntl_getlease(struct file *filp, unsigned int flavor)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
}
return type;
}
@ -1794,6 +1745,52 @@ int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
return 0;
}
/**
* check_conflicting_open - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
* @flags: current lock flags
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
check_conflicting_open(struct file *filp, const int arg, int flags)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (flags & FL_LAYOUT)
return 0;
if (flags & FL_DELEG)
/* We leave these checks to the caller */
return 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static int
generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
{
@ -1830,7 +1827,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = lease->fl_lmops->lm_open_conflict(filp, arg);
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
if (error)
goto out;
@ -1887,7 +1884,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
* precedes these checks.
*/
smp_mb();
error = lease->fl_lmops->lm_open_conflict(filp, arg);
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
if (error) {
locks_unlink_lock_ctx(&lease->c);
goto out;
@ -1899,7 +1896,7 @@ out_setup:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
if (!error && !my_fl)
@ -1935,7 +1932,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
return error;
}
@ -2738,7 +2735,7 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
lease_dispose_list(&dispose);
locks_dispose_list(&dispose);
}
/*

View File

@ -830,9 +830,11 @@ static inline bool legitimize_path(struct nameidata *nd,
static bool legitimize_links(struct nameidata *nd)
{
int i;
VFS_BUG_ON(nd->flags & LOOKUP_CACHED);
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
return false;
}
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
@ -881,11 +883,6 @@ static bool try_to_unlazy(struct nameidata *nd)
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out1;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
@ -921,11 +918,6 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out2;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out2;
res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
@ -2844,11 +2836,10 @@ static int filename_parentat(int dfd, struct filename *name,
}
/**
* __start_dirop - begin a create or remove dirop, performing locking and lookup
* start_dirop - begin a create or remove dirop, performing locking and lookup
* @parent: the dentry of the parent in which the operation will occur
* @name: a qstr holding the name within that parent
* @lookup_flags: intent and other lookup flags.
* @state: task state bitmask
*
* The lookup is performed and necessary locks are taken so that, on success,
* the returned dentry can be operated on safely.

View File

@ -137,7 +137,7 @@ static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
rreq->front_folio_order = order;
fsize = PAGE_SIZE << order;
fpos = folio_pos(folio);
fend = fpos + fsize;
fend = umin(fpos + fsize, rreq->i_size);
trace_netfs_collect_folio(rreq, folio, fend, collected_to);

View File

@ -764,28 +764,9 @@ nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
return lease_modify(onlist, arg, dispose);
}
/**
* nfsd4_layout_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the layout from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_layout_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
.lm_open_conflict = nfsd4_layout_lm_open_conflict,
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
};
int

View File

@ -5555,29 +5555,10 @@ nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
return -EAGAIN;
}
/**
* nfsd4_deleg_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the deleg from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_deleg_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd_lease_mng_ops = {
.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
.lm_open_conflict = nfsd4_deleg_lm_open_conflict,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)

View File

@ -517,18 +517,14 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* Namespaces that hang of nsproxy. */
case PIDFD_GET_CGROUP_NAMESPACE:
#ifdef CONFIG_CGROUPS
if (!ns_ref_get(nsp->cgroup_ns))
break;
ns_common = to_ns_common(nsp->cgroup_ns);
#endif
break;
case PIDFD_GET_IPC_NAMESPACE:
#ifdef CONFIG_IPC_NS
if (!ns_ref_get(nsp->ipc_ns))
break;
ns_common = to_ns_common(nsp->ipc_ns);
#endif
break;
case PIDFD_GET_MNT_NAMESPACE:
if (!ns_ref_get(nsp->mnt_ns))
@ -536,43 +532,32 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ns_common = to_ns_common(nsp->mnt_ns);
break;
case PIDFD_GET_NET_NAMESPACE:
#ifdef CONFIG_NET_NS
if (!ns_ref_get(nsp->net_ns))
break;
ns_common = to_ns_common(nsp->net_ns);
#endif
break;
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_PID_NS
if (!ns_ref_get(nsp->pid_ns_for_children))
break;
ns_common = to_ns_common(nsp->pid_ns_for_children);
#endif
break;
case PIDFD_GET_TIME_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns))
break;
ns_common = to_ns_common(nsp->time_ns);
#endif
break;
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns_for_children))
break;
ns_common = to_ns_common(nsp->time_ns_for_children);
#endif
break;
case PIDFD_GET_UTS_NAMESPACE:
#ifdef CONFIG_UTS_NS
if (!ns_ref_get(nsp->uts_ns))
break;
ns_common = to_ns_common(nsp->uts_ns);
#endif
break;
/* Namespaces that don't hang of nsproxy. */
case PIDFD_GET_USER_NAMESPACE:
#ifdef CONFIG_USER_NS
scoped_guard(rcu) {
struct user_namespace *user_ns;
@ -581,10 +566,8 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(user_ns);
}
#endif
break;
case PIDFD_GET_PID_NAMESPACE:
#ifdef CONFIG_PID_NS
scoped_guard(rcu) {
struct pid_namespace *pid_ns;
@ -593,7 +576,6 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(pid_ns);
}
#endif
break;
default:
return -ENOIOCTLCMD;

View File

@ -1831,6 +1831,7 @@ xfs_buffered_write_iomap_begin(
*/
if (flags & IOMAP_ZERO) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
u64 end;
if (isnullstartblock(imap.br_startblock) &&
offset_fsb >= eof_fsb)
@ -1850,14 +1851,12 @@ xfs_buffered_write_iomap_begin(
*/
if (imap.br_state == XFS_EXT_UNWRITTEN &&
offset_fsb < eof_fsb) {
loff_t foffset = offset, fend;
loff_t len = min(count,
XFS_FSB_TO_B(mp, imap.br_blockcount));
fend = offset +
min(count, XFS_FSB_TO_B(mp, imap.br_blockcount));
iomap_fill_dirty_folios(iter, &foffset, fend,
&iomap_flags);
end = iomap_fill_dirty_folios(iter, offset, len);
end_fsb = min_t(xfs_fileoff_t, end_fsb,
XFS_B_TO_FSB(mp, foffset));
XFS_B_TO_FSB(mp, end));
}
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);

View File

@ -51,7 +51,7 @@
int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name, u32 *gsi);
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
/* ACPI PCI Device Binding */

View File

@ -60,12 +60,6 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
void drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_disable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
@ -95,24 +89,8 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
void
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);

View File

@ -176,17 +176,33 @@ struct drm_bridge_funcs {
/**
* @disable:
*
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @disable vfunc. If the preceding element is a &drm_encoder
* it's called right before the &drm_encoder_helper_funcs.disable,
* &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
* hook.
* The @disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is disabled via one of:
*
* - &drm_bridge_funcs.disable
* - &drm_bridge_funcs.atomic_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms.
*
* The @disable callback is optional.
*
* NOTE:
@ -199,17 +215,34 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
* This callback should disable the bridge. It is called right after the
* preceding element in the display pipe is disabled. If the preceding
* element is a bridge this means it's called after that bridge's
* @post_disable function. If the preceding element is a &drm_encoder
* it's called right after the encoder's
* &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* signals) feeding it is no longer running when this callback is
* called.
* signals) feeding this bridge is no longer running when the
* @post_disable is called.
*
* This callback should perform all the actions required by the hardware
* after it has stopped receiving signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is post-disabled (unless marked otherwise by the
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.post_disable
* - &drm_bridge_funcs.atomic_post_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms
*
* The @post_disable callback is optional.
*
@ -252,18 +285,30 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @pre_enable function. If the preceding element is a
* &drm_encoder it's called right before the encoder's
* &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
* &drm_encoder_helper_funcs.dpms hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
* will not yet be running when the @pre_enable is called.
*
* This callback should perform all the necessary actions to prepare the
* bridge to accept signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is pre-enabled (unless marked otherwise by
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.pre_enable
* - &drm_bridge_funcs.atomic_pre_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - &drm_encoder_helper_funcs.commit
*
* The @pre_enable callback is optional.
*
@ -277,19 +322,31 @@ struct drm_bridge_funcs {
/**
* @enable:
*
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's @enable function. If the preceding element is a
* &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
* &drm_encoder_helper_funcs.dpms hook.
* The @enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is enabled via one of:
*
* - &drm_bridge_funcs.enable
* - &drm_bridge_funcs.atomic_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - drm_encoder_helper_funcs.commit
*
* The @enable callback is optional.
*
* NOTE:
@ -302,17 +359,30 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_pre_enable or @pre_enable function. If the preceding
* element is a &drm_encoder it's called right before the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
* will not yet be running when the @atomic_pre_enable is called.
*
* This callback should perform all the necessary actions to prepare the
* bridge to accept signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is pre-enabled (unless marked otherwise by
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.pre_enable
* - &drm_bridge_funcs.atomic_pre_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - &drm_encoder_helper_funcs.commit
*
* The @atomic_pre_enable callback is optional.
*/
@ -322,18 +392,31 @@ struct drm_bridge_funcs {
/**
* @atomic_enable:
*
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's @atomic_enable or @enable function. If the preceding element
* is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
* The @atomic_enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is enabled via one of:
*
* - &drm_bridge_funcs.enable
* - &drm_bridge_funcs.atomic_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - drm_encoder_helper_funcs.commit
*
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@ -341,16 +424,32 @@ struct drm_bridge_funcs {
/**
* @atomic_disable:
*
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_disable or @disable vfunc. If the preceding element
* is a &drm_encoder it's called right before the
* &drm_encoder_helper_funcs.atomic_disable hook.
* The @atomic_disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is disabled via one of:
*
* - &drm_bridge_funcs.disable
* - &drm_bridge_funcs.atomic_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms.
*
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@ -359,16 +458,34 @@ struct drm_bridge_funcs {
/**
* @atomic_post_disable:
*
* This callback should disable the bridge. It is called right after the
* preceding element in the display pipe is disabled. If the preceding
* element is a bridge this means it's called after that bridge's
* @atomic_post_disable or @post_disable function. If the preceding
* element is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* signals) feeding it is no longer running when this callback is
* called.
* signals) feeding this bridge is no longer running when the
* @atomic_post_disable is called.
*
* This callback should perform all the actions required by the hardware
* after it has stopped receiving signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is post-disabled (unless marked otherwise by the
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.post_disable
* - &drm_bridge_funcs.atomic_post_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms
*
* The @atomic_post_disable callback is optional.
*/

View File

@ -49,7 +49,6 @@ struct lease_manager_operations {
int (*lm_change)(struct file_lease *, int, struct list_head *);
void (*lm_setup)(struct file_lease *, void **);
bool (*lm_breaker_owns_lease)(struct file_lease *);
int (*lm_open_conflict)(struct file *, int);
};
struct lock_manager {

View File

@ -88,9 +88,6 @@ struct vm_fault;
/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
* for this operation, set by iomap_fill_dirty_folios().
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
*
@ -98,7 +95,6 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
@ -356,8 +352,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
loff_t end, unsigned int *iomap_flags);
loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
loff_t length);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);

View File

@ -23,7 +23,7 @@
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
struct xattr_args {
__aligned_u64 value;
__aligned_u64 __user value;
__u32 size;
__u32 flags;
};

View File

@ -902,11 +902,8 @@ out_clean:
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
if (data[thr].cr)
acomp_request_free(data[thr].cr);
if (!IS_ERR_OR_NULL(data[thr].cc))
crypto_free_acomp(data[thr].cc);
acomp_request_free(data[thr].cr);
crypto_free_acomp(data[thr].cc);
}
vfree(data);
}
@ -1502,11 +1499,8 @@ out_clean:
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
if (data[thr].cr)
acomp_request_free(data[thr].cr);
if (!IS_ERR_OR_NULL(data[thr].cc))
crypto_free_acomp(data[thr].cc);
acomp_request_free(data[thr].cr);
crypto_free_acomp(data[thr].cc);
}
vfree(data);
}