Hi all
This moves the pm_info file from debugfs to next to the other two power files.
Requested by several users at Phoronix.
PS: Please CC me. Also please be gentle, it's my first step in kernel-land ;)
Signed-off-by: Lauri Kasanen cand@gmx.com --- drivers/gpu/drm/radeon/radeon_pm.c | 86 ++++++++++++++++++----------------- 1 files changed, 44 insertions(+), 42 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 0882554..7aab18f 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -45,7 +45,6 @@ static const char *radeon_pm_state_type_name[5] = { };
static void radeon_dynpm_idle_work_handler(struct work_struct *work); -static int radeon_debugfs_pm_init(struct radeon_device *rdev); static bool radeon_pm_in_vbl(struct radeon_device *rdev); static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); static void radeon_pm_update_profile(struct radeon_device *rdev); @@ -437,8 +436,49 @@ fail: return count; }
+static ssize_t radeon_get_pm_info(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + + ssize_t curpos, len = PAGE_SIZE; + char *tmp; + + curpos = snprintf(buf, len, + "default engine clock: %u0 kHz\n" + "current engine clock: %u0 kHz\n" + "default memory clock: %u0 kHz\n", + rdev->pm.default_sclk, + radeon_get_engine_clock(rdev), + rdev->pm.default_mclk); + len -= curpos; + + if (rdev->asic->get_memory_clock) { + tmp = buf + curpos; + curpos += snprintf(tmp, len, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); + len = PAGE_SIZE - curpos; + } + + if (rdev->pm.current_vddc) { + tmp = buf + curpos; + curpos += snprintf(tmp, len, "voltage: %u mV\n", rdev->pm.current_vddc); + len = PAGE_SIZE - curpos; + } + + if (rdev->asic->get_pcie_lanes) { + tmp = buf + curpos; + curpos += snprintf(tmp, len, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); + len = PAGE_SIZE - curpos; + } + + return curpos; +} + static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); +static DEVICE_ATTR(power_info, S_IRUGO, radeon_get_pm_info, NULL);
static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -639,14 +679,14 @@ int radeon_pm_init(struct radeon_device *rdev) ret = device_create_file(rdev->dev, &dev_attr_power_method); if (ret) DRM_ERROR("failed to create device file for power method\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_info); + if (ret) + DRM_ERROR("failed to create device file for power info\n");
#ifdef CONFIG_ACPI rdev->acpi_nb.notifier_call = radeon_acpi_event; register_acpi_notifier(&rdev->acpi_nb); #endif - if (radeon_debugfs_pm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for PM!\n"); - }
DRM_INFO("radeon: power management initialized\n"); } @@ -843,41 +883,3 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) mutex_unlock(&rdev->pm.mutex); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); } - -/* - * Debugfs info - */ -#if defined(CONFIG_DEBUG_FS) - -static int radeon_debugfs_pm_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - - seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); - seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); - if (rdev->asic->pm.get_memory_clock) - seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); - if (rdev->pm.current_vddc) - seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); - if (rdev->asic->pm.get_pcie_lanes) - seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); - - return 0; -} - -static struct drm_info_list radeon_pm_info_list[] = { - {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, -}; -#endif - -static int radeon_debugfs_pm_init(struct radeon_device *rdev) -{ -#if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); -#else - return 0; -#endif -}
On 02.06.2012 18:08, Lauri Kasanen wrote:
Hi all
This moves the pm_info file from debugfs to next to the other two power files.
Requested by several users at Phoronix.
PS: Please CC me. Also please be gentle, it's my first step in kernel-land ;)
Signed-off-by: Lauri Kasanencand@gmx.com
Hui? What should this be good for?
Sysfs files are for setting driver parameters, like the power management method or profile currently in use. One major advantage of sysfs is the strict rules for a permanent and machine usable interface, for example it is mandatory to only specify one parameter per sysfs file.
Debugfs on the other hand should be used for human readable informations, e.g. the printing the current clocks in a human readable form. Also you don't need a debug build or turn on any other debugging facility to get those information, just take a look under "sys/kernel/debug/dri/*".
So the code is actually quite valid as it is.
Regards, Christian.
drivers/gpu/drm/radeon/radeon_pm.c | 86 ++++++++++++++++++----------------- 1 files changed, 44 insertions(+), 42 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 0882554..7aab18f 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -45,7 +45,6 @@ static const char *radeon_pm_state_type_name[5] = { };
static void radeon_dynpm_idle_work_handler(struct work_struct *work); -static int radeon_debugfs_pm_init(struct radeon_device *rdev); static bool radeon_pm_in_vbl(struct radeon_device *rdev); static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); static void radeon_pm_update_profile(struct radeon_device *rdev); @@ -437,8 +436,49 @@ fail: return count; }
+static ssize_t radeon_get_pm_info(struct device *dev,
struct device_attribute *attr,
char *buf)
+{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
- struct radeon_device *rdev = ddev->dev_private;
- ssize_t curpos, len = PAGE_SIZE;
- char *tmp;
- curpos = snprintf(buf, len,
"default engine clock: %u0 kHz\n"
"current engine clock: %u0 kHz\n"
"default memory clock: %u0 kHz\n",
rdev->pm.default_sclk,
radeon_get_engine_clock(rdev),
rdev->pm.default_mclk);
- len -= curpos;
- if (rdev->asic->get_memory_clock) {
tmp = buf + curpos;
curpos += snprintf(tmp, len, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
len = PAGE_SIZE - curpos;
- }
- if (rdev->pm.current_vddc) {
tmp = buf + curpos;
curpos += snprintf(tmp, len, "voltage: %u mV\n", rdev->pm.current_vddc);
len = PAGE_SIZE - curpos;
- }
- if (rdev->asic->get_pcie_lanes) {
tmp = buf + curpos;
curpos += snprintf(tmp, len, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
len = PAGE_SIZE - curpos;
- }
- return curpos;
+}
- static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+static DEVICE_ATTR(power_info, S_IRUGO, radeon_get_pm_info, NULL);
static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -639,14 +679,14 @@ int radeon_pm_init(struct radeon_device *rdev) ret = device_create_file(rdev->dev,&dev_attr_power_method); if (ret) DRM_ERROR("failed to create device file for power method\n");
ret = device_create_file(rdev->dev,&dev_attr_power_info);
if (ret)
DRM_ERROR("failed to create device file for power info\n");
#ifdef CONFIG_ACPI rdev->acpi_nb.notifier_call = radeon_acpi_event; register_acpi_notifier(&rdev->acpi_nb); #endif
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
DRM_INFO("radeon: power management initialized\n"); }
@@ -843,41 +883,3 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) mutex_unlock(&rdev->pm.mutex); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); }
-/*
- Debugfs info
- */
-#if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_pm_info(struct seq_file *m, void *data) -{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
- if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
- if (rdev->pm.current_vddc)
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
- if (rdev->asic->pm.get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
- return 0;
-}
-static struct drm_info_list radeon_pm_info_list[] = {
- {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
-}; -#endif
-static int radeon_debugfs_pm_init(struct radeon_device *rdev) -{ -#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
-#else
- return 0;
-#endif -}
On Sun, 03 Jun 2012 12:54:30 +0200 Christian König deathsimple@vodafone.de wrote:
This moves the pm_info file from debugfs to next to the other two power files.
Requested by several users at Phoronix.
PS: Please CC me. Also please be gentle, it's my first step in kernel-land ;)
Hui? What should this be good for?
Sysfs files are for setting driver parameters, like the power management method or profile currently in use. One major advantage of sysfs is the strict rules for a permanent and machine usable interface, for example it is mandatory to only specify one parameter per sysfs file.
Debugfs on the other hand should be used for human readable informations, e.g. the printing the current clocks in a human readable form. Also you don't need a debug build or turn on any other debugging facility to get those information, just take a look under "sys/kernel/debug/dri/*".
I have no such dir, /sys/kernel/debug. The fact you have it means you have CONFIG_DEBUGFS enabled and mounted.
So the code is actually quite valid as it is.
First, the current location is illogical, and several users have complained about it. This info should be right next to where it is tweaked, ie right next to power_profile and power_method. That is where it's expected to be by users.
Secondly, checking the clocks is absolutely not a debug operation. Therefore requiring a debug option (CONFIG_DEBUGFS) to see this info is plain wrong. This info needs to be available to all users, including those on production kernels without such debug options.
--
So the issue is the location of the info, not the format. I'd be more than happy to split it into six files (default_core_clock, current_core_clock...) that each offer just a kHz number, just like the cpufreq scaling_cur_freq do. Would that be preferable?
- Lauri
On 04.06.2012 10:44, Lauri Kasanen wrote:
On Sun, 03 Jun 2012 12:54:30 +0200 Christian Königdeathsimple@vodafone.de wrote:
This moves the pm_info file from debugfs to next to the other two power files.
Requested by several users at Phoronix.
PS: Please CC me. Also please be gentle, it's my first step in kernel-land ;)
Hui? What should this be good for?
Sysfs files are for setting driver parameters, like the power management method or profile currently in use. One major advantage of sysfs is the strict rules for a permanent and machine usable interface, for example it is mandatory to only specify one parameter per sysfs file.
Debugfs on the other hand should be used for human readable informations, e.g. the printing the current clocks in a human readable form. Also you don't need a debug build or turn on any other debugging facility to get those information, just take a look under "sys/kernel/debug/dri/*".
I have no such dir, /sys/kernel/debug. The fact you have it means you have CONFIG_DEBUGFS enabled and mounted.
So the code is actually quite valid as it is.
First, the current location is illogical, and several users have complained about it. This info should be right next to where it is tweaked, ie right next to power_profile and power_method. That is where it's expected to be by users.
Secondly, checking the clocks is absolutely not a debug operation. Therefore requiring a debug option (CONFIG_DEBUGFS) to see this info is plain wrong.. This info needs to be available to all users, including those on production kernels without such debug options.
--
So the issue is the location of the info, not the format. I'd be more than happy to split it into six files (default_core_clock, current_core_clock...) that each offer just a kHz number, just like the cpufreq scaling_cur_freq do. Would that be preferable?
Yeah, that sounds like a start, and also only register those files if the clock in question is really available, e. g. integrated chipsets doesn't have a memory clock for example.
But I have my doubts that it would be accepted easily, cause for debugfs we can pretty much pump every information in there we want, while sysfs files must maintain a more or less stable API for setting system parameters, see Documentation/sysfs-rules.txt.
Christian.
On Mon, Jun 4, 2012 at 7:30 AM, Christian König deathsimple@vodafone.de wrote:
On 04.06.2012 10:44, Lauri Kasanen wrote:
On Sun, 03 Jun 2012 12:54:30 +0200 Christian Königdeathsimple@vodafone.de wrote:
This moves the pm_info file from debugfs to next to the other two power files.
Requested by several users at Phoronix.
PS: Please CC me. Also please be gentle, it's my first step in kernel-land ;)
Hui? What should this be good for?
Sysfs files are for setting driver parameters, like the power management method or profile currently in use. One major advantage of sysfs is the strict rules for a permanent and machine usable interface, for example it is mandatory to only specify one parameter per sysfs file.
Debugfs on the other hand should be used for human readable informations, e.g. the printing the current clocks in a human readable form. Also you don't need a debug build or turn on any other debugging facility to get those information, just take a look under "sys/kernel/debug/dri/*".
I have no such dir, /sys/kernel/debug. The fact you have it means you have CONFIG_DEBUGFS enabled and mounted.
So the code is actually quite valid as it is.
First, the current location is illogical, and several users have complained about it. This info should be right next to where it is tweaked, ie right next to power_profile and power_method. That is where it's expected to be by users.
Secondly, checking the clocks is absolutely not a debug operation. Therefore requiring a debug option (CONFIG_DEBUGFS) to see this info is plain wrong.. This info needs to be available to all users, including those on production kernels without such debug options.
--
So the issue is the location of the info, not the format. I'd be more than happy to split it into six files (default_core_clock, current_core_clock...) that each offer just a kHz number, just like the cpufreq scaling_cur_freq do. Would that be preferable?
Yeah, that sounds like a start, and also only register those files if the clock in question is really available, e. g. integrated chipsets doesn't have a memory clock for example.
But I have my doubts that it would be accepted easily, cause for debugfs we can pretty much pump every information in there we want, while sysfs files must maintain a more or less stable API for setting system parameters, see Documentation/sysfs-rules.txt.
That's my main concern. I don't want maintain the current debug interface as a stable one, that's why it's in debugfs.
Alex
Christian.
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Le 04/06/2012 13:30, Christian König a écrit :
On 04.06.2012 10:44, Lauri Kasanen wrote:
So the issue is the location of the info, not the format. I'd be more than happy to split it into six files (default_core_clock, current_core_clock...) that each offer just a kHz number, just like the cpufreq scaling_cur_freq do. Would that be preferable?
Yeah, that sounds like a start, and also only register those files if the clock in question is really available, e. g. integrated chipsets doesn't have a memory clock for example.
But I have my doubts that it would be accepted easily, cause for debugfs we can pretty much pump every information in there we want, while sysfs files must maintain a more or less stable API for setting system parameters, see Documentation/sysfs-rules.txt.
Christian.
Hey everyone,
Now that we talk about this, I think we need a common way to expose clocks and performance levels across drm drivers. At the moment, Nouveau's sysfs interface for PM is all but great. I'm sure we can all agree on a common interface.
NVIDIA PM is performance-level based (up to 4 + possible overclocking). IIRC, AMD uses perflvls too (3, low, middle and high). No idea about Intel but I'm curious about their boost feature. I also don't know how many reclockable domains you have, but so far, we reclock 9 domains.
What we could want to expose: - 1) Several performance profiles (boot, 0, 1 ,2 ,3 , dynamic, custom) - 2) Selectable performance profile for battery/sector (arguable) - 3) Clocks for each performance profile - 4) Voltage for the performance profile (there may be voltage domains too) - 5) The custom performance profile should allow the user to set clocks and voltage
= Performance profile representation =
I think that exposing one sysfs per clock per perflvl is insane, at least on Nouveau (that would result in 63 files). At the moment, we expose some clocks of a perflvl in one readable single line (c: core 405MHz shader 810MHz memory 405MHz voltage 900mV) However, this is kind of against the sysfs rule that sysfs is not a human interface. We didn't pay attention to it but we'll have to do it soon.
IMO, we should use one sysfs file per performance profile and then display clocks as a csv. Each column meaning a different domain. If a domain is not available on a given card, then the advertised clock should be 0. That should allow us to add new domains when needed.
The custom performance level could be constructed in the same way.
However, I'm not sure about what should be done for voltage. At the moment, we have only one adjustable voltage domain. It may however change in the future. The solution may be to have 2 sysfs files per performance profile, one exposing clocks and one exposing voltages.
= Setting clocks =
The user can only select between a few performance profiles: - the boot clocks - the performance levels as found in the vbios - the dynamic profile that switches between the vbios perflvls according to the load - the custom profile, for tweakers
Currently, we allow the user to set 2 performances profiles, one will be used when on battery and one is used when on sector. If the user sets only one performance profile, then it is used in both cases.
Example: Both values are specified in one write: echo 0,2 > /sys/class/drm/card0/device/performance_level
= Reading clocks =
I see 3 important informations we need to export: - The current performance profile - The current clocks & voltages - The settings actually set by the user (battery/sector perflvl)
= Anything else ? =
What do you think? Did I forgot anything? How would this idea map to your hardware/drivers?
Martin
On Mon, Jun 4, 2012 at 8:40 AM, Martin Peres martin.peres@free.fr wrote:
Le 04/06/2012 13:30, Christian König a écrit :
On 04.06.2012 10:44, Lauri Kasanen wrote:
So the issue is the location of the info, not the format. I'd be more than happy to split it into six files (default_core_clock, current_core_clock...) that each offer just a kHz number, just like the cpufreq scaling_cur_freq do. Would that be preferable?
Yeah, that sounds like a start, and also only register those files if the clock in question is really available, e. g. integrated chipsets doesn't have a memory clock for example.
But I have my doubts that it would be accepted easily, cause for debugfs we can pretty much pump every information in there we want, while sysfs files must maintain a more or less stable API for setting system parameters, see Documentation/sysfs-rules.txt.
Christian.
Hey everyone,
Now that we talk about this, I think we need a common way to expose clocks and performance levels across drm drivers. At the moment, Nouveau's sysfs interface for PM is all but great. I'm sure we can all agree on a common interface.
NVIDIA PM is performance-level based (up to 4 + possible overclocking). IIRC, AMD uses perflvls too (3, low, middle and high). No idea about Intel but I'm curious about their boost feature. I also don't know how many reclockable domains you have, but so far, we reclock 9 domains.
What we could want to expose:
- Several performance profiles (boot, 0, 1 ,2 ,3 , dynamic, custom)
- Selectable performance profile for battery/sector (arguable)
- Clocks for each performance profile
- Voltage for the performance profile (there may be voltage domains too)
- The custom performance profile should allow the user to set clocks and
voltage
= Performance profile representation =
I think that exposing one sysfs per clock per perflvl is insane, at least on Nouveau (that would result in 63 files). At the moment, we expose some clocks of a perflvl in one readable single line (c: core 405MHz shader 810MHz memory 405MHz voltage 900mV) However, this is kind of against the sysfs rule that sysfs is not a human interface. We didn't pay attention to it but we'll have to do it soon.
IMO, we should use one sysfs file per performance profile and then display clocks as a csv. Each column meaning a different domain. If a domain is not available on a given card, then the advertised clock should be 0. That should allow us to add new domains when needed.
The custom performance level could be constructed in the same way.
However, I'm not sure about what should be done for voltage. At the moment, we have only one adjustable voltage domain. It may however change in the future. The solution may be to have 2 sysfs files per performance profile, one exposing clocks and one exposing voltages.
= Setting clocks =
The user can only select between a few performance profiles:
- the boot clocks
- the performance levels as found in the vbios
- the dynamic profile that switches between the vbios perflvls according to
the load
- the custom profile, for tweakers
Currently, we allow the user to set 2 performances profiles, one will be used when on battery and one is used when on sector. If the user sets only one performance profile, then it is used in both cases.
Example: Both values are specified in one write: echo 0,2 > /sys/class/drm/card0/device/performance_level
= Reading clocks =
I see 3 important informations we need to export:
- The current performance profile
- The current clocks & voltages
- The settings actually set by the user (battery/sector perflvl)
= Anything else ? =
What do you think? Did I forgot anything? How would this idea map to your hardware/drivers?
Martin
I don't think sysfs is the way to go, i am pretty sure that power management will change drasticly again in the future especialy btw discret and integrated GPU. I would rather have hardware specific ioctl.
Cheers, Jerome
Le 04/06/2012 16:31, Jerome Glisse a écrit :
I don't think sysfs is the way to go, i am pretty sure that power management will change drasticly again in the future especialy btw discret and integrated GPU. I would rather have hardware specific ioctl.
Cheers, Jerome
Any particular idea of what may change?
The proposed interface would work even on entirely dynamic reclocking (per-domain frequency scaling) as opposed to perflvl-based reclocking. It is unlikely to happen though.
A more realistic possibility could be that engines would be reclocked independently according to their internal load. If that was to happen, then the already-existing API could be used as a master control and engines could specify the performance level policy they want (follow master, always min, always max, conservative, dynamic).
I'm pretty sure voltage domains will soon appear but I don't see what could possibly be changed that wouldn't be covered by what I proposed.
As for the reason we would like to use IOCTLs instead of sysfs, I really don't understand what is the rationale. I personally want to empower the users to let them decide what is best for them. Sysfs is way easier to work with!
Although my idea may be sketchy, I am dead-serious about coming up with a good API.
Martin
On Mon, Jun 4, 2012 at 11:02 AM, Martin Peres martin.peres@free.fr wrote:
Le 04/06/2012 16:31, Jerome Glisse a écrit :
I don't think sysfs is the way to go, i am pretty sure that power management will change drasticly again in the future especialy btw discret and integrated GPU. I would rather have hardware specific ioctl.
Cheers, Jerome
Any particular idea of what may change?
The proposed interface would work even on entirely dynamic reclocking (per-domain frequency scaling) as opposed to perflvl-based reclocking. It is unlikely to happen though.
A more realistic possibility could be that engines would be reclocked independently according to their internal load. If that was to happen, then the already-existing API could be used as a master control and engines could specify the performance level policy they want (follow master, always min, always max, conservative, dynamic).
I'm pretty sure voltage domains will soon appear but I don't see what could possibly be changed that wouldn't be covered by what I proposed.
As for the reason we would like to use IOCTLs instead of sysfs, I really don't understand what is the rationale. I personally want to empower the users to let them decide what is best for them. Sysfs is way easier to work with!
Although my idea may be sketchy, I am dead-serious about coming up with a good API.
Martin
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Cheers, Jerome
On 04.06.2012 17:18, Jerome Glisse wrote:
On Mon, Jun 4, 2012 at 11:02 AM, Martin Peresmartin.peres@free.fr wrote:
Le 04/06/2012 16:31, Jerome Glisse a écrit :
I don't think sysfs is the way to go, i am pretty sure that power management will change drasticly again in the future especialy btw discret and integrated GPU. I would rather have hardware specific ioctl.
Cheers, Jerome
Any particular idea of what may change?
The proposed interface would work even on entirely dynamic reclocking (per-domain frequency scaling) as opposed to perflvl-based reclocking. It is unlikely to happen though.
A more realistic possibility could be that engines would be reclocked independently according to their internal load. If that was to happen, then the already-existing API could be used as a master control and engines could specify the performance level policy they want (follow master, always min, always max, conservative, dynamic).
I'm pretty sure voltage domains will soon appear but I don't see what could possibly be changed that wouldn't be covered by what I proposed.
As for the reason we would like to use IOCTLs instead of sysfs, I really don't understand what is the rationale. I personally want to empower the users to let them decide what is best for them. Sysfs is way easier to work with!
Although my idea may be sketchy, I am dead-serious about coming up with a good API.
Martin
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Yeah, I think Jerome is right here.
The internals like different voltage areas, dependencies of clocks, possible powered down chip areas, etc are very complex and actually completely unteresting to the end user. Also the general direction of AMD hardware is going to a complete self containing system, e.g. the chip is handling mostly everything on its own.
I agree that this is better done in a hardware dependent ioctl and then abstracted in userspace instead of pushing the whole abstraction into the kernel.
Regards, Christian.
On Mon, 04 Jun 2012 18:18:10 +0200 Christian König deathsimple@vodafone.de wrote:
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Yeah, I think Jerome is right here.
The internals like different voltage areas, dependencies of clocks, possible powered down chip areas, etc are very complex and actually completely unteresting to the end user. Also the general direction of AMD hardware is going to a complete self containing system, e.g. the chip is handling mostly everything on its own.
I agree that this is better done in a hardware dependent ioctl and then abstracted in userspace instead of pushing the whole abstraction into the kernel.
Hi
As long as the info is made available better than it is currently, I'm all for it.
Though with the direction of ioctl and folks, it is deep outside my expertise. With a deep solution like that someone who knows the area well would need to do it.
- Lauri
On Mon, Jun 4, 2012 at 12:29 PM, Lauri Kasanen cand@gmx.com wrote:
On Mon, 04 Jun 2012 18:18:10 +0200 Christian König deathsimple@vodafone.de wrote:
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Yeah, I think Jerome is right here.
The internals like different voltage areas, dependencies of clocks, possible powered down chip areas, etc are very complex and actually completely unteresting to the end user. Also the general direction of AMD hardware is going to a complete self containing system, e.g. the chip is handling mostly everything on its own.
I agree that this is better done in a hardware dependent ioctl and then abstracted in userspace instead of pushing the whole abstraction into the kernel.
Hi
As long as the info is made available better than it is currently, I'm all for it.
Though with the direction of ioctl and folks, it is deep outside my expertise. With a deep solution like that someone who knows the area well would need to do it.
- Lauri
My dream here is to talk with the gnome folks to have them make some kind GPU module we could write and that would show in control center. I just need to corner some of my gnome coworker to work something out. So if you were using nouveau you would a nouveau specific control center entry where there could be nouveau specific information (clock voltage) and control. Same for radeon, intel ...
Cheers, Jerome
On Mon, 4 Jun 2012 13:05:46 -0400 Jerome Glisse j.glisse@gmail.com wrote:
My dream here is to talk with the gnome folks to have them make some kind GPU module we could write and that would show in control center. I just need to corner some of my gnome coworker to work something out. So if you were using nouveau you would a nouveau specific control center entry where there could be nouveau specific information (clock voltage) and control. Same for radeon, intel ...
As I don't use Gnome or any other heavy DE, I'll end up writing a cli tool for that (radeon, at the start if they differ) anyway. So don't worry about the userspace part, just expose everything kernel-side :)
- Lauri
Le 04/06/2012 17:18, Jerome Glisse a écrit :
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
IMO, this isn't something the user should ever be aware of. NVIDIA GPUs have been clock gated for years and we can also cut the power to some parts of the card.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
Yeah, the power budget could get in our way. However, if no perflvl is defined in the vbios and we can't calculate a given preset, then we are in the completly dynamic scenario I talked about earlier. It still fits the proposed interface because the user is only setting a performance profile, not a performance level. That's the difference.
Moreover, we can't ask the user for anything too complex anyway...
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Want to do power management from the userspace? :o
If you need that much control, you're doing something wrong. The kernel should be in charge of power management. The interface I'm talking about is just a way to report clocks to the user *and* to get some input from the user of what he really wants to achieve.
In the case where a user would want to set clocks + voltage himself, the sysfs interface I proposed works perfectly and atomically:
- The user sets both the voltage and clock domains of the custom performance level. - Then the user is free to switch to the custom performance profile. - Here is your atomicity ;)
While I agree that future GPUs will get more and more complex, I still think we need something that is broad-enough to accomodate for future architectures and precise-enough to give users good power.
I would really like to get an interface like that in the foreseeable future, there is no rush but we still need to find a way and I would like the DRM community to think about this issue.
Martin
On Mon, Jun 4, 2012 at 1:01 PM, Martin Peres martin.peres@free.fr wrote:
Le 04/06/2012 17:18, Jerome Glisse a écrit :
My experience is that things that are true today for GPU, are not tomorrow. Yes there will still be clock/voltage, but there could be new complete different things, like shutting down block.
IMO, this isn't something the user should ever be aware of. NVIDIA GPUs have been clock gated for years and we can also cut the power to some parts of the card.
I am not even mentioning things like complex value range dependency btw things (for instance if a domain as clock/voltage in certain range than some other domain can only have clock in a restricted range value).
Yeah, the power budget could get in our way. However, if no perflvl is defined in the vbios and we can't calculate a given preset, then we are in the completly dynamic scenario I talked about earlier. It still fits the proposed interface because the user is only setting a performance profile, not a performance level. That's the difference.
Moreover, we can't ask the user for anything too complex anyway...
While i agree that sysfs looks easy for user to play with, i believe that gui is what you really after and afaik closed source driver all expose a gui for their power management. Using ioctl allow better overall control, like atomic setting of several domain ...
Want to do power management from the userspace? :o
If you need that much control, you're doing something wrong. The kernel should be in charge of power management. The interface I'm talking about is just a way to report clocks to the user *and* to get some input from the user of what he really wants to achieve.
In the case where a user would want to set clocks + voltage himself, the sysfs interface I proposed works perfectly and atomically:
- The user sets both the voltage and clock domains of the custom
performance level.
- Then the user is free to switch to the custom performance profile.
- Here is your atomicity ;)
While I agree that future GPUs will get more and more complex, I still think we need something that is broad-enough to accomodate for future architectures and precise-enough to give users good power.
I would really like to get an interface like that in the foreseeable future, there is no rush but we still need to find a way and I would like the DRM community to think about this issue.
Martin
My point is that there is no way for power management to find an API that fits all GPU. If i were to do it now, i would have one ioctl version for r3xx, one for r5xx, one for r6xx/r7xx, one for r8xx, one for r9xx, ... yes there would be some common fields accross them.
That being said i think one file might fit all GPU is the power profile one accepting something : performance, normal, energy I am pretty sure all GPU have and will continue to have & use power profile. But when it comes to reporting information and making custom profile i would use an ioctl because on that side i see way too many difference accross gpu from same company but from different generation, so i wouldn't even want to try to bolt something accross GPU from different company. Also think to IGP, where memory clock doesn't make sense and where even voltage might not make sense as the GPU might be so entangle with the CPU that it would be linked with CPU powerstate.
Also when i was refering to shutting down things, i think for instance that some custom profile/powersaving might want to disable shader engine (way more radical than clock gatting). Also think to case of single card multi GPU, people might want to have both GPU working with same profile like when in performance mode, or power down one of the GPU.
So as i said in previous mail, my perfect solution is ioctl and let the driver dev do some kind of plugin for gnome-control-center (similar to what compiz effect plugin was from design pov) where driver dev can put a gui that reflect best what is available for each specific case.
Cheers, Jerome
Answers inlined.
Le 04/06/2012 19:19, Jerome Glisse a écrit :
My point is that there is no way for power management to find an API that fits all GPU. If i were to do it now, i would have one ioctl version for r3xx, one for r5xx, one for r6xx/r7xx, one for r8xx, one for r9xx, ... yes there would be some common fields accross them.
Right, but would the userspace care for so much information?
As a user, I would rather want a slider that would range from agressive power management to full performance. Then, it would be cool for tweakers to have a way to set everything they want. But if the interface cannot be common, that's not that important.
That being said i think one file might fit all GPU is the power profile one accepting something : performance, normal, energy I am pretty sure all GPU have and will continue to have& use power profile.
We agree on that.
But when it comes to reporting information and making custom profile i would use an ioctl because on that side i see way too many difference accross gpu from same company but from different generation, so i wouldn't even want to try to bolt something accross GPU from different company. Also think to IGP, where memory clock doesn't make sense and where even voltage might not make sense as the GPU might be so entangle with the CPU that it would be linked with CPU powerstate.
Fair-enough. I'll need to study AMD hw more then. It is perfectly doable and straight forward on nvidia, even on IGP.
Also when i was refering to shutting down things, i think for instance that some custom profile/powersaving might want to disable shader engine (way more radical than clock gatting). Also think to case of single card multi GPU, people might want to have both GPU working with same profile like when in performance mode, or power down one of the GPU.
Exactly my point, but we seem to disagree on who should do this.
I guess my point makes sense only when put this way, the driver is responsible for lowering power consumption whenever it has the opportunity to do so (and that means being really opportunistic).
Your point makes sense when thinking the kernel should be doing as little as possible.
To be honest, I'm working towards really opportunistic reclocking using a general-purpose engine on newer GPUs. This would save a lot of energy on the typical browsing scenario.
This is why I'm concerned about exposing too much to the userspace, the current state is so volatile that it doesn't even make sense to mention it in same cases.
So as i said in previous mail, my perfect solution is ioctl and let the driver dev do some kind of plugin for gnome-control-center (similar to what compiz effect plugin was from design pov) where driver dev can put a gui that reflect best what is available for each specific case.
Yeah, I get your point as a kernel dev, but I pitty the userspace dev that will need to figure out how to use all these ioctls and configuration options.
What about having a simple common API (sysfs, preferably) that allow to change the performance profile and leave the rest to drivers?
Would that be acceptable? Cheers, Martin
On Mon, Jun 4, 2012 at 1:54 PM, Martin Peres martin.peres@free.fr wrote:
Answers inlined.
Le 04/06/2012 19:19, Jerome Glisse a écrit :
My point is that there is no way for power management to find an API that fits all GPU. If i were to do it now, i would have one ioctl version for r3xx, one for r5xx, one for r6xx/r7xx, one for r8xx, one for r9xx, ... yes there would be some common fields accross them.
Right, but would the userspace care for so much information?
I think it might, think about newer GPU where we could let user do a custom profile to restrict the GPU into some range of power consumption/temperature/fan speed .... What kind of information i would want to expose would be highly GPU family related.
As a user, I would rather want a slider that would range from agressive power management to full performance. Then, it would be cool for tweakers to have a way to set everything they want. But if the interface cannot be common, that's not that important.
That being said i think one file might fit all GPU is the power profile one accepting something : performance, normal, energy I am pretty sure all GPU have and will continue to have& use power profile.
We agree on that.
But when it comes to reporting information and making custom profile i would use an ioctl because on that side i see way too many difference accross gpu from same company but from different generation, so i wouldn't even want to try to bolt something accross GPU from different company. Also think to IGP, where memory clock doesn't make sense and where even voltage might not make sense as the GPU might be so entangle with the CPU that it would be linked with CPU powerstate.
Fair-enough. I'll need to study AMD hw more then. It is perfectly doable and straight forward on nvidia, even on IGP.
Also when i was refering to shutting down things, i think for instance that some custom profile/powersaving might want to disable shader engine (way more radical than clock gatting). Also think to case of single card multi GPU, people might want to have both GPU working with same profile like when in performance mode, or power down one of the GPU.
Exactly my point, but we seem to disagree on who should do this.
I guess my point makes sense only when put this way, the driver is responsible for lowering power consumption whenever it has the opportunity to do so (and that means being really opportunistic).
Your point makes sense when thinking the kernel should be doing as little as possible.
No, my point make sense for the case where the kernel is doing a lot of thing too. We want agressive kernel power management where downclock the GPU even if the user is not asking for it.
Still user might want to create custom profile to limit the range of what the kernel can do, like for instance force the kernel to never go above some clock/voltage, or below some level. The way i see user input is more as guideline to the kernel side rather than as strict rules.
To be honest, I'm working towards really opportunistic reclocking using a general-purpose engine on newer GPUs. This would save a lot of energy on the typical browsing scenario.
We also want to go that way on AMD GPU.
This is why I'm concerned about exposing too much to the userspace, the current state is so volatile that it doesn't even make sense to mention it in same cases.
User might want to know in which range is the GPU frequency, or limit the power usage of its GPU, ...
So as i said in previous mail, my perfect solution is ioctl and let the driver dev do some kind of plugin for gnome-control-center (similar to what compiz effect plugin was from design pov) where driver dev can put a gui that reflect best what is available for each specific case.
Yeah, I get your point as a kernel dev, but I pitty the userspace dev that will need to figure out how to use all these ioctls and configuration options.
My point there is that we do the userspace bit, i would like to come up with some kind of module that thing like gnome-control-center can use or kde equivalent or anyother desktop environment.
What about having a simple common API (sysfs, preferably) that allow to change the performance profile and leave the rest to drivers?
Would that be acceptable? Cheers, Martin
I think a powerprofile file is acceptable, i am not sure if it should be discrete like performance/normal/battery/low or something more like a scale 0-10 with 10 being full power and 0 being the lowest power the GPU can do. I am more incline to the word solution for which we can gave clear definition like :
low : power mode is only able to meet the need of one light graphical application (like video playback) expect some sluggishness in rendering when switching between application. battery: save as much power and reduce GPU speed while still allowing fast desktop reactivity ...
Cheers, Jerome
On Mon, Jun 4, 2012 at 2:18 PM, Jerome Glisse j.glisse@gmail.com wrote:
On Mon, Jun 4, 2012 at 1:54 PM, Martin Peres martin.peres@free.fr wrote:
Answers inlined.
Le 04/06/2012 19:19, Jerome Glisse a écrit :
My point is that there is no way for power management to find an API that fits all GPU. If i were to do it now, i would have one ioctl version for r3xx, one for r5xx, one for r6xx/r7xx, one for r8xx, one for r9xx, ... yes there would be some common fields accross them.
Right, but would the userspace care for so much information?
I think it might, think about newer GPU where we could let user do a custom profile to restrict the GPU into some range of power consumption/temperature/fan speed .... What kind of information i would want to expose would be highly GPU family related.
As a user, I would rather want a slider that would range from agressive power management to full performance. Then, it would be cool for tweakers to have a way to set everything they want. But if the interface cannot be common, that's not that important.
That being said i think one file might fit all GPU is the power profile one accepting something : performance, normal, energy I am pretty sure all GPU have and will continue to have& use power profile.
We agree on that.
But when it comes to reporting information and making custom profile i would use an ioctl because on that side i see way too many difference accross gpu from same company but from different generation, so i wouldn't even want to try to bolt something accross GPU from different company. Also think to IGP, where memory clock doesn't make sense and where even voltage might not make sense as the GPU might be so entangle with the CPU that it would be linked with CPU powerstate.
Fair-enough. I'll need to study AMD hw more then. It is perfectly doable and straight forward on nvidia, even on IGP.
Also when i was refering to shutting down things, i think for instance that some custom profile/powersaving might want to disable shader engine (way more radical than clock gatting). Also think to case of single card multi GPU, people might want to have both GPU working with same profile like when in performance mode, or power down one of the GPU.
Exactly my point, but we seem to disagree on who should do this.
I guess my point makes sense only when put this way, the driver is responsible for lowering power consumption whenever it has the opportunity to do so (and that means being really opportunistic).
Your point makes sense when thinking the kernel should be doing as little as possible.
No, my point make sense for the case where the kernel is doing a lot of thing too. We want agressive kernel power management where downclock the GPU even if the user is not asking for it.
Still user might want to create custom profile to limit the range of what the kernel can do, like for instance force the kernel to never go above some clock/voltage, or below some level. The way i see user input is more as guideline to the kernel side rather than as strict rules.
To be honest, I'm working towards really opportunistic reclocking using a general-purpose engine on newer GPUs. This would save a lot of energy on the typical browsing scenario.
We also want to go that way on AMD GPU.
This is why I'm concerned about exposing too much to the userspace, the current state is so volatile that it doesn't even make sense to mention it in same cases.
User might want to know in which range is the GPU frequency, or limit the power usage of its GPU, ...
So as i said in previous mail, my perfect solution is ioctl and let the driver dev do some kind of plugin for gnome-control-center (similar to what compiz effect plugin was from design pov) where driver dev can put a gui that reflect best what is available for each specific case.
Yeah, I get your point as a kernel dev, but I pitty the userspace dev that will need to figure out how to use all these ioctls and configuration options.
My point there is that we do the userspace bit, i would like to come up with some kind of module that thing like gnome-control-center can use or kde equivalent or anyother desktop environment.
What about having a simple common API (sysfs, preferably) that allow to change the performance profile and leave the rest to drivers?
Would that be acceptable? Cheers, Martin
I think a powerprofile file is acceptable, i am not sure if it should be discrete like performance/normal/battery/low or something more like a scale 0-10 with 10 being full power and 0 being the lowest power the GPU can do. I am more incline to the word solution for which we can gave clear definition like :
low : power mode is only able to meet the need of one light graphical application (like video playback) expect some sluggishness in rendering when switching between application. battery: save as much power and reduce GPU speed while still allowing fast desktop reactivity ...
Cheers, Jerome
Maybe i should stress that i believe that driver can't efficiently do quick reclocking when it comes to GPU. My understanding is that newer GPU wether AMD or NVidia use some kind of controller that use a small firmware to control power adjustment base on GPU usage on the flight. Those firmware use driver input to determine range of operation (max/min voltage/frequency/fan speed/...) What i want to allow is for userspace to customize and know about those range of operations. A power profile is just a set of range (a range might be a single value on some less adjustable GPU) for each of the adjustable parameters.
Cheers, Jerome
On 04.06.2012 20:29, Jerome Glisse wrote:
On Mon, Jun 4, 2012 at 2:18 PM, Jerome Glissej.glisse@gmail.com wrote:
Yeah, I get your point as a kernel dev, but I pitty the userspace dev that will need to figure out how to use all these ioctls and configuration options.
My point there is that we do the userspace bit, i would like to come up with some kind of module that thing like gnome-control-center can use or kde equivalent or anyother desktop environment.
A GUI will be needed, but I thought it would be much simpler to share as much of the PM interface as possible.
If you hear about someone starting the project, I'll be glad to give some input and contribute some code too for Nouveau.
What about having a simple common API (sysfs, preferably) that allow to change the performance profile and leave the rest to drivers?
Would that be acceptable? Cheers, Martin
I think a powerprofile file is acceptable, i am not sure if it should be discrete like performance/normal/battery/low or something more like a scale 0-10 with 10 being full power and 0 being the lowest power the GPU can do. I am more incline to the word solution for which we can gave clear definition like :
low : power mode is only able to meet the need of one light graphical application (like video playback) expect some sluggishness in rendering when switching between application. battery: save as much power and reduce GPU speed while still allowing fast desktop reactivity ...
Just like you, my mind is mixed. However, I don't think "performance/normal/battery/low" is a good way to put it.
I would go for something like that: low (static, clocked to the lowest) conservative (DVFS but slow to upclock, instant declock) : default for battery normal (DVFS, instant upclock and declock) performance (DVFS, instant upclock and slow declock) : default for sector benchmark (static, clocked to the highest)
slow ~= 100ms, instant == less than a frame
The hardest part will be upclocking the GPU right to where it isn't a bottleneck anymore.
Cheers, Jerome
Maybe i should stress that i believe that driver can't efficiently do quick reclocking when it comes to GPU. My understanding is that newer GPU wether AMD or NVidia use some kind of controller that use a small firmware to control power adjustment base on GPU usage on the flight. Those firmware use driver input to determine range of operation (max/min voltage/frequency/fan speed/...) What i want to allow is for userspace to customize and know about those range of operations. A power profile is just a set of range (a range might be a single value on some less adjustable GPU) for each of the adjustable parameters.
Exactly my thoughts too :) PDAEMON is a 200MHz engine that has full access to MMIO. We can get and generate IRQs from/for the host. It can access performance counters but it seems like it also has some performance counters of his own.
The only problem is that we have no good compiler for this ISA. The ISA was reverse engineered by mwk, we have a good assembler. So I'm trying to keep things as simple as possible for maintainability :D
FYI: https://gitorious.org/pdaemon/pdaemon/blobs/master/pdaemon.fuc
Anyway, we are in the same mindset, so let's implement what we both of us have in mind and see how we can share things when we are done with our toy implementations, we can try to compare our solutions.
Cheers, Jerome
Martin
On Mon, Jun 04, 2012 at 02:18:00PM -0400, Jerome Glisse wrote:
On Mon, Jun 4, 2012 at 1:54 PM, Martin Peres martin.peres@free.fr wrote:
Answers inlined.
Le 04/06/2012 19:19, Jerome Glisse a écrit :
My point is that there is no way for power management to find an API that fits all GPU. If i were to do it now, i would have one ioctl version for r3xx, one for r5xx, one for r6xx/r7xx, one for r8xx, one for r9xx, ... yes there would be some common fields accross them.
Right, but would the userspace care for so much information?
I think it might, think about newer GPU where we could let user do a custom profile to restrict the GPU into some range of power consumption/temperature/fan speed .... What kind of information i would want to expose would be highly GPU family related.
In i915-land we're trying to make things Just Work. If needed we can expose (generation/platform-specific) tunables in sysfs. But on snb and later the combination of rc6+gpu turbo (mostly handled all by hw) is rather ok, so I don't see anything going above and beyond a min/max frequency limiter: Moving the min upwards would help for benchmarking, pushing the max doin might help conserver power for sucky let's eat everything gpu workloads. -Daniel
On 04.06.2012 21:54, Daniel Vetter wrote:
In i915-land we're trying to make things Just Work. If needed we can expose (generation/platform-specific) tunables in sysfs. But on snb and later the combination of rc6+gpu turbo (mostly handled all by hw) is rather ok, so I don't see anything going above and beyond a min/max frequency limiter: Moving the min upwards would help for benchmarking, pushing the max doin might help conserver power for sucky let's eat everything gpu workloads. -Daniel
Thanks Daniel. No wonders why Intel IGPs are so power efficient :)
dri-devel@lists.freedesktop.org