2020-02-28 10:36:12 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2020-02-28 10:36:13 +01:00
|
|
|
#include <stdbool.h>
|
2020-02-28 10:36:12 +01:00
|
|
|
#include <assert.h>
|
2020-06-23 10:09:03 -03:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2020-07-19 20:13:11 +02:00
|
|
|
#include "metricgroup.h"
|
|
|
|
#include "debug.h"
|
2023-06-23 08:10:05 -07:00
|
|
|
#include "evlist.h"
|
2020-02-28 10:36:12 +01:00
|
|
|
#include "expr.h"
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
#include "smt.h"
|
|
|
|
#include "tool_pmu.h"
|
2023-07-27 19:24:47 -07:00
|
|
|
#include <util/expr-bison.h>
|
|
|
|
#include <util/expr-flex.h>
|
2022-11-09 10:49:11 -08:00
|
|
|
#include "util/hashmap.h"
|
2023-08-16 12:47:46 +01:00
|
|
|
#include "util/header.h"
|
|
|
|
#include "util/pmu.h"
|
2024-11-07 08:20:33 -08:00
|
|
|
#include <perf/cpumap.h>
|
2021-12-12 06:25:02 +00:00
|
|
|
#include <linux/err.h>
|
2020-05-15 15:17:32 -07:00
|
|
|
#include <linux/kernel.h>
|
2020-07-19 20:13:11 +02:00
|
|
|
#include <linux/zalloc.h>
|
|
|
|
#include <ctype.h>
|
2021-11-10 16:21:06 -08:00
|
|
|
#include <math.h>
|
2020-02-28 10:36:13 +01:00
|
|
|
|
2020-08-26 08:30:55 -07:00
|
|
|
struct expr_id_data {
|
|
|
|
union {
|
2021-11-10 16:21:09 -08:00
|
|
|
struct {
|
|
|
|
double val;
|
|
|
|
int source_count;
|
|
|
|
} val;
|
2020-08-26 08:30:55 -07:00
|
|
|
struct {
|
|
|
|
double val;
|
|
|
|
const char *metric_name;
|
|
|
|
const char *metric_expr;
|
|
|
|
} ref;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
/* Holding a double value. */
|
|
|
|
EXPR_ID_DATA__VALUE,
|
|
|
|
/* Reference to another metric. */
|
|
|
|
EXPR_ID_DATA__REF,
|
|
|
|
/* A reference but the value has been computed. */
|
|
|
|
EXPR_ID_DATA__REF_VALUE,
|
|
|
|
} kind;
|
|
|
|
};
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
static size_t key_hash(long key, void *ctx __maybe_unused)
|
2020-05-15 15:17:32 -07:00
|
|
|
{
|
|
|
|
const char *str = (const char *)key;
|
|
|
|
size_t hash = 0;
|
|
|
|
|
|
|
|
while (*str != '\0') {
|
|
|
|
hash *= 31;
|
|
|
|
hash += *str;
|
|
|
|
str++;
|
|
|
|
}
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
|
2020-05-15 15:17:32 -07:00
|
|
|
{
|
|
|
|
return !strcmp((const char *)key1, (const char *)key2);
|
|
|
|
}
|
|
|
|
|
2021-09-23 00:46:11 -07:00
|
|
|
struct hashmap *ids__new(void)
|
|
|
|
{
|
2021-12-14 01:10:27 +00:00
|
|
|
struct hashmap *hash;
|
|
|
|
|
|
|
|
hash = hashmap__new(key_hash, key_equal, NULL);
|
|
|
|
if (IS_ERR(hash))
|
|
|
|
return NULL;
|
|
|
|
return hash;
|
2021-09-23 00:46:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void ids__free(struct hashmap *ids)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
size_t bkt;
|
|
|
|
|
|
|
|
if (ids == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hashmap__for_each_entry(ids, cur, bkt) {
|
2023-04-12 09:50:08 -03:00
|
|
|
zfree(&cur->pkey);
|
|
|
|
zfree(&cur->pvalue);
|
2021-09-23 00:46:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
hashmap__free(ids);
|
|
|
|
}
|
|
|
|
|
2021-10-15 10:21:20 -07:00
|
|
|
int ids__insert(struct hashmap *ids, const char *id)
|
2020-07-19 20:13:03 +02:00
|
|
|
{
|
|
|
|
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
|
|
|
|
char *old_key = NULL;
|
|
|
|
int ret;
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
|
2020-07-19 20:13:03 +02:00
|
|
|
if (ret)
|
|
|
|
free(data_ptr);
|
|
|
|
free(old_key);
|
|
|
|
free(old_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-09-23 00:46:11 -07:00
|
|
|
struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
|
|
|
|
{
|
|
|
|
size_t bkt;
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
int ret;
|
|
|
|
struct expr_id_data *old_data = NULL;
|
|
|
|
char *old_key = NULL;
|
|
|
|
|
|
|
|
if (!ids1)
|
|
|
|
return ids2;
|
|
|
|
|
|
|
|
if (!ids2)
|
|
|
|
return ids1;
|
|
|
|
|
|
|
|
if (hashmap__size(ids1) < hashmap__size(ids2)) {
|
|
|
|
struct hashmap *tmp = ids1;
|
|
|
|
|
|
|
|
ids1 = ids2;
|
|
|
|
ids2 = tmp;
|
|
|
|
}
|
|
|
|
hashmap__for_each_entry(ids2, cur, bkt) {
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
|
2021-09-23 00:46:11 -07:00
|
|
|
free(old_key);
|
|
|
|
free(old_data);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
hashmap__free(ids1);
|
|
|
|
hashmap__free(ids2);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hashmap__free(ids2);
|
|
|
|
return ids1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Caller must make sure id is allocated */
|
|
|
|
int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
|
|
|
|
{
|
2021-10-15 10:21:20 -07:00
|
|
|
return ids__insert(ctx->ids, id);
|
2021-09-23 00:46:11 -07:00
|
|
|
}
|
|
|
|
|
2020-02-28 10:36:12 +01:00
|
|
|
/* Caller must make sure id is allocated */
|
2020-07-12 15:26:18 +02:00
|
|
|
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
|
2021-11-10 16:21:09 -08:00
|
|
|
{
|
|
|
|
return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Caller must make sure id is allocated */
|
|
|
|
int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
|
|
|
|
double val, int source_count)
|
2020-02-28 10:36:12 +01:00
|
|
|
{
|
2020-07-12 15:26:18 +02:00
|
|
|
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
|
2020-05-15 15:17:32 -07:00
|
|
|
char *old_key = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2020-07-19 20:13:03 +02:00
|
|
|
data_ptr = malloc(sizeof(*data_ptr));
|
|
|
|
if (!data_ptr)
|
|
|
|
return -ENOMEM;
|
2021-11-10 16:21:09 -08:00
|
|
|
data_ptr->val.val = val;
|
|
|
|
data_ptr->val.source_count = source_count;
|
2020-08-26 08:30:55 -07:00
|
|
|
data_ptr->kind = EXPR_ID_DATA__VALUE;
|
2020-07-19 20:13:03 +02:00
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
|
2025-07-10 16:51:20 -07:00
|
|
|
if (ret) {
|
2020-07-19 20:13:02 +02:00
|
|
|
free(data_ptr);
|
2025-07-10 16:51:20 -07:00
|
|
|
} else if (old_data) {
|
|
|
|
data_ptr->val.val += old_data->val.val;
|
|
|
|
data_ptr->val.source_count += old_data->val.source_count;
|
|
|
|
}
|
2020-05-15 15:17:32 -07:00
|
|
|
free(old_key);
|
2020-07-12 15:26:18 +02:00
|
|
|
free(old_data);
|
2020-05-15 15:17:32 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-19 20:13:11 +02:00
|
|
|
int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
|
|
|
|
{
|
|
|
|
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
|
|
|
|
char *old_key = NULL;
|
2022-10-03 19:15:52 -07:00
|
|
|
char *name;
|
2020-07-19 20:13:11 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
data_ptr = zalloc(sizeof(*data_ptr));
|
|
|
|
if (!data_ptr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
name = strdup(ref->metric_name);
|
|
|
|
if (!name) {
|
|
|
|
free(data_ptr);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intentionally passing just const char pointers,
|
|
|
|
* originally from 'struct pmu_event' object.
|
|
|
|
* We don't need to change them, so there's no
|
|
|
|
* need to create our own copy.
|
|
|
|
*/
|
|
|
|
data_ptr->ref.metric_name = ref->metric_name;
|
|
|
|
data_ptr->ref.metric_expr = ref->metric_expr;
|
2020-08-26 08:30:55 -07:00
|
|
|
data_ptr->kind = EXPR_ID_DATA__REF;
|
2020-07-19 20:13:11 +02:00
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
|
2020-07-19 20:13:11 +02:00
|
|
|
if (ret)
|
|
|
|
free(data_ptr);
|
|
|
|
|
|
|
|
pr_debug2("adding ref metric %s: %s\n",
|
|
|
|
ref->metric_name, ref->metric_expr);
|
|
|
|
|
|
|
|
free(old_key);
|
|
|
|
free(old_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-19 20:13:04 +02:00
|
|
|
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
|
|
|
|
struct expr_id_data **data)
|
2020-05-15 15:17:32 -07:00
|
|
|
{
|
2025-02-07 07:28:44 -08:00
|
|
|
if (!ctx || !id)
|
|
|
|
return -1;
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
return hashmap__find(ctx->ids, id, data) ? 0 : -1;
|
2020-02-28 10:36:12 +01:00
|
|
|
}
|
|
|
|
|
2021-10-15 10:21:28 -07:00
|
|
|
bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
|
|
|
|
struct expr_parse_ctx *needles)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
size_t bkt;
|
|
|
|
struct expr_id_data *data;
|
|
|
|
|
|
|
|
hashmap__for_each_entry(needles->ids, cur, bkt) {
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
if (expr__get_id(haystack, cur->pkey, &data))
|
2021-10-15 10:21:28 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-19 20:13:12 +02:00
|
|
|
int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
|
|
|
|
struct expr_id_data **datap)
|
|
|
|
{
|
|
|
|
struct expr_id_data *data;
|
|
|
|
|
|
|
|
if (expr__get_id(ctx, id, datap) || !*datap) {
|
|
|
|
pr_debug("%s not found\n", id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
data = *datap;
|
|
|
|
|
2020-08-26 08:30:55 -07:00
|
|
|
switch (data->kind) {
|
|
|
|
case EXPR_ID_DATA__VALUE:
|
2021-11-10 16:21:09 -08:00
|
|
|
pr_debug2("lookup(%s): val %f\n", id, data->val.val);
|
2020-08-26 08:30:55 -07:00
|
|
|
break;
|
|
|
|
case EXPR_ID_DATA__REF:
|
|
|
|
pr_debug2("lookup(%s): ref metric name %s\n", id,
|
|
|
|
data->ref.metric_name);
|
2020-07-19 20:13:12 +02:00
|
|
|
pr_debug("processing metric: %s ENTRY\n", id);
|
2020-08-26 08:30:55 -07:00
|
|
|
data->kind = EXPR_ID_DATA__REF_VALUE;
|
2021-10-15 10:21:16 -07:00
|
|
|
if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
|
2020-07-19 20:13:12 +02:00
|
|
|
pr_debug("%s failed to count\n", id);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-11-10 16:21:09 -08:00
|
|
|
pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
|
2020-08-26 08:30:55 -07:00
|
|
|
break;
|
|
|
|
case EXPR_ID_DATA__REF_VALUE:
|
|
|
|
pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
|
|
|
|
data->ref.val, data->ref.metric_name);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0); /* Unreachable. */
|
2020-07-19 20:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-19 20:13:05 +02:00
|
|
|
void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
|
|
|
|
{
|
|
|
|
struct expr_id_data *old_val = NULL;
|
|
|
|
char *old_key = NULL;
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 16:26:09 +02:00
|
|
|
hashmap__delete(ctx->ids, id, &old_key, &old_val);
|
2020-07-19 20:13:05 +02:00
|
|
|
free(old_key);
|
|
|
|
free(old_val);
|
|
|
|
}
|
|
|
|
|
2021-09-23 00:46:04 -07:00
|
|
|
struct expr_parse_ctx *expr__ctx_new(void)
|
2020-02-28 10:36:12 +01:00
|
|
|
{
|
2021-09-23 00:46:04 -07:00
|
|
|
struct expr_parse_ctx *ctx;
|
|
|
|
|
2024-11-08 14:34:25 +00:00
|
|
|
ctx = calloc(1, sizeof(struct expr_parse_ctx));
|
2021-09-23 00:46:04 -07:00
|
|
|
if (!ctx)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ctx->ids = hashmap__new(key_hash, key_equal, NULL);
|
2021-12-12 06:25:02 +00:00
|
|
|
if (IS_ERR(ctx->ids)) {
|
|
|
|
free(ctx);
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-10-15 10:21:20 -07:00
|
|
|
|
2021-09-23 00:46:04 -07:00
|
|
|
return ctx;
|
2020-05-15 15:17:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void expr__ctx_clear(struct expr_parse_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
size_t bkt;
|
|
|
|
|
2021-09-23 00:46:04 -07:00
|
|
|
hashmap__for_each_entry(ctx->ids, cur, bkt) {
|
2023-04-12 09:50:08 -03:00
|
|
|
zfree(&cur->pkey);
|
|
|
|
zfree(&cur->pvalue);
|
2021-09-23 00:46:04 -07:00
|
|
|
}
|
|
|
|
hashmap__clear(ctx->ids);
|
|
|
|
}
|
|
|
|
|
|
|
|
void expr__ctx_free(struct expr_parse_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
size_t bkt;
|
|
|
|
|
2022-08-31 10:49:25 -07:00
|
|
|
if (!ctx)
|
|
|
|
return;
|
|
|
|
|
2023-04-12 09:50:08 -03:00
|
|
|
zfree(&ctx->sctx.user_requested_cpu_list);
|
2021-09-23 00:46:04 -07:00
|
|
|
hashmap__for_each_entry(ctx->ids, cur, bkt) {
|
2023-04-12 09:50:08 -03:00
|
|
|
zfree(&cur->pkey);
|
|
|
|
zfree(&cur->pvalue);
|
2020-05-15 15:17:32 -07:00
|
|
|
}
|
2021-09-23 00:46:04 -07:00
|
|
|
hashmap__free(ctx->ids);
|
|
|
|
free(ctx);
|
2020-02-28 10:36:12 +01:00
|
|
|
}
|
2020-02-28 10:36:13 +01:00
|
|
|
|
|
|
|
static int
|
2020-04-02 02:03:34 +05:30
|
|
|
__expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
|
2021-10-15 10:21:16 -07:00
|
|
|
bool compute_ids)
|
2020-02-28 10:36:13 +01:00
|
|
|
{
|
|
|
|
YY_BUFFER_STATE buffer;
|
|
|
|
void *scanner;
|
|
|
|
int ret;
|
|
|
|
|
2020-07-19 20:13:12 +02:00
|
|
|
pr_debug2("parsing metric: %s\n", expr);
|
|
|
|
|
2022-08-31 10:49:21 -07:00
|
|
|
ret = expr_lex_init_extra(&ctx->sctx, &scanner);
|
2020-02-28 10:36:13 +01:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
buffer = expr__scan_string(expr, scanner);
|
|
|
|
|
|
|
|
#ifdef PARSER_DEBUG
|
|
|
|
expr_debug = 1;
|
2020-05-01 10:33:28 -07:00
|
|
|
expr_set_debug(1, scanner);
|
2020-02-28 10:36:13 +01:00
|
|
|
#endif
|
|
|
|
|
2021-09-23 00:46:13 -07:00
|
|
|
ret = expr_parse(val, ctx, compute_ids, scanner);
|
2020-02-28 10:36:13 +01:00
|
|
|
|
|
|
|
expr__flush_buffer(buffer, scanner);
|
|
|
|
expr__delete_buffer(buffer, scanner);
|
|
|
|
expr_lex_destroy(scanner);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-15 15:17:32 -07:00
|
|
|
int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
|
2021-10-15 10:21:16 -07:00
|
|
|
const char *expr)
|
2020-02-28 10:36:13 +01:00
|
|
|
{
|
2021-10-15 10:21:16 -07:00
|
|
|
return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
|
2020-02-28 10:36:13 +01:00
|
|
|
}
|
|
|
|
|
2021-09-23 00:46:10 -07:00
|
|
|
int expr__find_ids(const char *expr, const char *one,
|
2021-10-15 10:21:16 -07:00
|
|
|
struct expr_parse_ctx *ctx)
|
2020-02-28 10:36:13 +01:00
|
|
|
{
|
2021-10-15 10:21:16 -07:00
|
|
|
int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
|
2020-05-15 15:17:32 -07:00
|
|
|
|
2020-07-19 20:13:05 +02:00
|
|
|
if (one)
|
|
|
|
expr__del_id(ctx, one);
|
2020-02-28 10:36:13 +01:00
|
|
|
|
2020-05-15 15:17:32 -07:00
|
|
|
return ret;
|
2020-02-28 10:36:13 +01:00
|
|
|
}
|
2020-08-26 08:30:55 -07:00
|
|
|
|
|
|
|
double expr_id_data__value(const struct expr_id_data *data)
|
|
|
|
{
|
|
|
|
if (data->kind == EXPR_ID_DATA__VALUE)
|
2021-11-10 16:21:09 -08:00
|
|
|
return data->val.val;
|
2020-08-26 08:30:55 -07:00
|
|
|
assert(data->kind == EXPR_ID_DATA__REF_VALUE);
|
|
|
|
return data->ref.val;
|
|
|
|
}
|
2021-11-10 16:21:06 -08:00
|
|
|
|
2021-11-10 16:21:09 -08:00
|
|
|
double expr_id_data__source_count(const struct expr_id_data *data)
|
|
|
|
{
|
|
|
|
assert(data->kind == EXPR_ID_DATA__VALUE);
|
|
|
|
return data->val.source_count;
|
|
|
|
}
|
|
|
|
|
2022-08-31 10:49:25 -07:00
|
|
|
double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
|
2021-11-10 16:21:06 -08:00
|
|
|
{
|
2021-11-23 16:12:28 -08:00
|
|
|
double result = NAN;
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
enum tool_pmu_event ev = tool_pmu__str_to_event(literal + 1);
|
2021-11-10 16:21:07 -08:00
|
|
|
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
if (ev != TOOL_PMU__EVENT_NONE) {
|
|
|
|
u64 count;
|
2021-11-10 16:21:07 -08:00
|
|
|
|
perf tool_pmu: Allow num_cpus(_online) to be specific to a cpumask
For hybrid metrics it is useful to know the number of p-core or e-core
CPUs. If a cpumask is specified for the num_cpus or num_cpus_online
tool events, compute the value relative to the given mask rather than
for the full system.
```
$ sudo /tmp/perf/perf stat -e 'tool/num_cpus/,tool/num_cpus,cpu=cpu_core/,
tool/num_cpus,cpu=cpu_atom/,tool/num_cpus_online/,tool/num_cpus_online,
cpu=cpu_core/,tool/num_cpus_online,cpu=cpu_atom/' true
Performance counter stats for 'true':
28 tool/num_cpus/
16 tool/num_cpus,cpu=cpu_core/
12 tool/num_cpus,cpu=cpu_atom/
28 tool/num_cpus_online/
16 tool/num_cpus_online,cpu=cpu_core/
12 tool/num_cpus_online,cpu=cpu_atom/
0.000767205 seconds time elapsed
0.000938000 seconds user
0.000000000 seconds sys
```
Reviewed-by: Thomas Falcon <thomas.falcon@intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: James Clark <james.clark@linaro.org>
Link: https://lore.kernel.org/r/20250719030517.1990983-6-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2025-07-18 20:05:07 -07:00
|
|
|
if (tool_pmu__read_event(ev, /*evsel=*/NULL, &count))
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
result = count;
|
|
|
|
else
|
|
|
|
pr_err("Failure to read '%s'", literal);
|
2022-07-18 09:43:10 -07:00
|
|
|
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
} else if (!strcmp("#core_wide", literal)) {
|
2023-02-19 01:28:04 -08:00
|
|
|
result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
|
2022-08-31 10:49:25 -07:00
|
|
|
? 1.0 : 0.0;
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
} else {
|
|
|
|
pr_err("Unrecognized literal '%s'", literal);
|
2023-03-24 00:22:17 -07:00
|
|
|
}
|
2021-11-10 16:21:07 -08:00
|
|
|
|
2021-11-23 16:12:28 -08:00
|
|
|
pr_debug2("literal: %s = %f\n", literal, result);
|
|
|
|
return result;
|
2021-11-10 16:21:06 -08:00
|
|
|
}
|
2023-06-23 08:10:05 -07:00
|
|
|
|
|
|
|
/* Does the event 'id' parse? Determine via ctx->ids if possible. */
|
|
|
|
double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id)
|
|
|
|
{
|
|
|
|
struct evlist *tmp;
|
|
|
|
double ret;
|
|
|
|
|
|
|
|
if (hashmap__find(ctx->ids, id, /*value=*/NULL))
|
|
|
|
return 1.0;
|
|
|
|
|
|
|
|
if (!compute_ids)
|
|
|
|
return 0.0;
|
|
|
|
|
|
|
|
tmp = evlist__new();
|
|
|
|
if (!tmp)
|
|
|
|
return NAN;
|
2024-02-09 12:49:45 -08:00
|
|
|
|
|
|
|
if (strchr(id, '@')) {
|
|
|
|
char *tmp_id, *p;
|
|
|
|
|
|
|
|
tmp_id = strdup(id);
|
|
|
|
if (!tmp_id) {
|
|
|
|
ret = NAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
p = strchr(tmp_id, '@');
|
|
|
|
*p = '/';
|
|
|
|
p = strrchr(tmp_id, '@');
|
|
|
|
*p = '/';
|
|
|
|
ret = parse_event(tmp, tmp_id) ? 0 : 1;
|
|
|
|
free(tmp_id);
|
|
|
|
} else {
|
|
|
|
ret = parse_event(tmp, id) ? 0 : 1;
|
|
|
|
}
|
|
|
|
out:
|
2023-06-23 08:10:05 -07:00
|
|
|
evlist__delete(tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
2023-08-16 12:47:46 +01:00
|
|
|
|
|
|
|
double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
|
|
|
|
bool compute_ids __maybe_unused, const char *test_id)
|
|
|
|
{
|
|
|
|
double ret;
|
2024-11-07 08:20:33 -08:00
|
|
|
struct perf_cpu cpu = {-1};
|
|
|
|
char *cpuid = get_cpuid_allow_env_override(cpu);
|
2023-08-16 12:47:46 +01:00
|
|
|
|
|
|
|
if (!cpuid)
|
|
|
|
return NAN;
|
|
|
|
|
|
|
|
ret = !strcmp_cpuid_str(test_id, cpuid);
|
|
|
|
|
|
|
|
free(cpuid);
|
|
|
|
return ret;
|
|
|
|
}
|