Skip to content

Commit

Permalink
create-diff-object: Create __patchable_function_entries sections
Browse files Browse the repository at this point in the history
The __mcount_loc section contains the addresses of patchable ftrace
sites which is used by the ftrace infrastructure in the kernel to create
a list of tracable functions and to know where to patch to enable
tracing of them.  On some kernel configurations, section is called
__patchable_function_entries and is generated by the compiler.  Either of
__mcount_loc or __patchable_function_entries is recognised by the kernel
but for these configurations, use __patchable_function_entries as it is
what is expected.

The x86_64 arch is special (of course).  Unlike other arches (ppc64le
and aarch64) a x86_64 kernel built with -fpatchable-function-entry will
generate nops AND create rela__patchable_function_entries for functions even
marked as notrace.  For this arch, always create __mount_loc sections and
rely on __fentry__ relocations to indicate ftrace call sites.

Note: this patch is a refactoring of original code by Pete Swain
<[email protected]> for aarch64.  At the same time, this version squashes
several follow up commits from him and zimao <[email protected]>.  The
intent is minimize the eventual changeset for aarch64 support now that
other arches are making use of __patchable_function_entries sections.

Signed-off-by: Joe Lawrence <[email protected]>
  • Loading branch information
joe-lawrence committed Dec 3, 2024
1 parent 17b795b commit 7b6fee6
Show file tree
Hide file tree
Showing 3 changed files with 171 additions and 46 deletions.
192 changes: 148 additions & 44 deletions kpatch-build/create-diff-object.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,9 +615,14 @@ static void kpatch_compare_correlated_section(struct section *sec)
!is_text_section(sec1)))
DIFF_FATAL("%s section header details differ from %s", sec1->name, sec2->name);

/* Short circuit for mcount sections, we rebuild regardless */
/*
* Short circuit for mcount and patchable_function_entries
* sections, we rebuild regardless
*/
if (!strcmp(sec->name, ".rela__mcount_loc") ||
!strcmp(sec->name, "__mcount_loc")) {
!strcmp(sec->name, "__mcount_loc") ||
!strcmp(sec->name, ".rela__patchable_function_entries") ||
!strcmp(sec->name, "__patchable_function_entries")) {
sec->status = SAME;
goto out;
}
Expand Down Expand Up @@ -3676,31 +3681,66 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char *
}
}

/*
* Create links between text sections and their corresponding
* __patchable_function_entries sections (as there may be multiple pfe
* sections).
*/
static void kpatch_set_pfe_link(struct kpatch_elf *kelf)
{
struct section* sec;
struct rela *rela;

if (!kelf->has_pfe)
return;

list_for_each_entry(sec, &kelf->sections, list) {
if (strcmp(sec->name, "__patchable_function_entries"))
continue;

if (!sec->rela)
continue;

list_for_each_entry(rela, &sec->rela->relas, list)
rela->sym->pfe = sec;
}
}

/*
* This function basically reimplements the functionality of the Linux
* recordmcount script, so that patched functions can be recognized by ftrace.
*
* TODO: Eventually we can modify recordmount so that it recognizes our bundled
* sections as valid and does this work for us.
*/
static void kpatch_create_mcount_sections(struct kpatch_elf *kelf)
static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool has_pfe)
{
int nr, index;
struct section *sec, *relasec;
struct symbol *sym;
struct rela *rela, *mcount_rela;
struct section *sec = NULL;
struct symbol *sym, *rela_sym;
struct rela *rela;
void **funcs;
unsigned long insn_offset = 0;
unsigned int rela_offset;

nr = 0;
list_for_each_entry(sym, &kelf->symbols, list)
if (sym->type == STT_FUNC && sym->status != SAME &&
sym->has_func_profiling)
nr++;

/* create text/rela section pair */
sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr);
relasec = sec->rela;
if (has_pfe)
/*
* Create separate __patchable_function_entries sections
* for each function in the following loop.
*/
kelf->has_pfe = true;
else
/*
* Create a single __mcount_loc section pair for all
* functions.
*/
sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr);

/* populate sections */
index = 0;
Expand All @@ -3709,25 +3749,37 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf)
continue;

if (!sym->has_func_profiling) {
log_debug("function %s has no fentry/mcount call, no mcount record is needed\n",
log_debug("function %s has no ftrace callsite, no __patchable_function_entries/mcount record is needed\n",
sym->name);
continue;
}

switch(kelf->arch) {
case PPC64: {
bool found = false;
unsigned char *insn;

list_for_each_entry(rela, &sym->sec->rela->relas, list)
if (!strcmp(rela->sym->name, "_mcount")) {
found = true;
break;
}
if (kelf->has_pfe) {
insn_offset = sym->sym.st_value + PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other);
insn = sym->sec->data->d_buf + insn_offset;

/* verify nops */
if (insn[0] != 0x00 || insn[1] != 0x00 || insn[2] != 0x00 || insn[3] != 0x60 ||
insn[4] != 0x00 || insn[5] != 0x00 || insn[6] != 0x00 || insn[7] != 0x60)
ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
} else {
bool found = false;

if (!found)
ERROR("%s: unexpected missing call to _mcount()", __func__);
list_for_each_entry(rela, &sym->sec->rela->relas, list)
if (!strcmp(rela->sym->name, "_mcount")) {
found = true;
break;
}

insn_offset = rela->offset;
if (!found)
ERROR("%s: unexpected missing call to _mcount()", __func__);

insn_offset = rela->offset;
}
break;
}
case X86_64: {
Expand Down Expand Up @@ -3783,16 +3835,31 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf)
ERROR("unsupported arch");
}

/*
* 'rela' points to the mcount/fentry call.
*
* Create a .rela__mcount_loc entry which also points to it.
*/
ALLOC_LINK(mcount_rela, &relasec->relas);
mcount_rela->sym = sym;
mcount_rela->type = absolute_rela_type(kelf);
mcount_rela->addend = insn_offset - sym->sym.st_value;
mcount_rela->offset = (unsigned int) (index * sizeof(*funcs));
if (kelf->has_pfe) {
/*
* Allocate a dedicated __patchable_function_entries for this function:
* - its .sh_link will be updated by kpatch_reindex_elements()
* - its lone rela is based on the section symbol
*/
sec = create_section_pair(kelf, "__patchable_function_entries", sizeof(void *), 1);
sec->sh.sh_flags |= SHF_WRITE | SHF_ALLOC | SHF_LINK_ORDER;
rela_sym = sym->sec->secsym;
rela_offset = 0;
rela_sym->pfe = sec;
} else {
/*
* mcount relas are based on the function symbol and saved in a
* single aggregate __mcount_loc section
*/
rela_sym = sym;
rela_offset = (unsigned int) (index * sizeof(*funcs));
}

ALLOC_LINK(rela, &sec->rela->relas);
rela->sym = rela_sym;
rela->type = absolute_rela_type(kelf);
rela->addend = insn_offset - rela->sym->sym.st_value;
rela->offset = rela_offset;

index++;
}
Expand Down Expand Up @@ -3945,36 +4012,66 @@ static void kpatch_no_sibling_calls_ppc64le(struct kpatch_elf *kelf)
sibling_call_errors);
}

static bool kpatch_symbol_has_pfe_entry(struct kpatch_elf *kelf, struct symbol *sym)
{
struct section *sec;
struct rela *rela;

if (!kelf->has_pfe)
return false;

list_for_each_entry(sec, &kelf->sections, list) {
if (strcmp(sec->name, "__patchable_function_entries"))
continue;
if (!sec->rela)
continue;

list_for_each_entry(rela, &sec->rela->relas, list) {
if (rela->sym->sec && sym->sec == rela->sym->sec &&
rela->sym->pfe == sec) {
return true;
}
}
}

return false;
}

/* Check which functions have fentry/mcount calls; save this info for later use. */
static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf)
{
struct symbol *sym;
struct rela *rela;
unsigned char *insn;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->type != STT_FUNC || sym->is_pfx ||
!sym->sec || !sym->sec->rela)
if (sym->type != STT_FUNC || sym->is_pfx || !sym->sec)
continue;

switch(kelf->arch) {
case PPC64:
list_for_each_entry(rela, &sym->sec->rela->relas, list) {
if (!strcmp(rela->sym->name, "_mcount")) {
sym->has_func_profiling = 1;
break;
if (kpatch_symbol_has_pfe_entry(kelf, sym)) {
sym->has_func_profiling = 1;
} else if (sym->sec->rela) {
list_for_each_entry(rela, &sym->sec->rela->relas, list) {
if (!strcmp(rela->sym->name, "_mcount")) {
sym->has_func_profiling = 1;
break;
}
}
}
break;
case X86_64:
rela = list_first_entry(&sym->sec->rela->relas, struct rela,
list);
if ((rela->type != R_X86_64_NONE &&
rela->type != R_X86_64_PC32 &&
rela->type != R_X86_64_PLT32) ||
strcmp(rela->sym->name, "__fentry__"))
continue;
if (sym->sec->rela) {
rela = list_first_entry(&sym->sec->rela->relas, struct rela,
list);
if ((rela->type != R_X86_64_NONE &&
rela->type != R_X86_64_PC32 &&
rela->type != R_X86_64_PLT32) ||
strcmp(rela->sym->name, "__fentry__"))
continue;

sym->has_func_profiling = 1;
sym->has_func_profiling = 1;
}
break;
case S390:
/* Check for compiler generated fentry nop - jgnop 0 */
Expand Down Expand Up @@ -4045,6 +4142,7 @@ int main(int argc, char *argv[])
struct section *relasec, *symtab;
char *orig_obj, *patched_obj, *parent_name;
char *parent_symtab, *mod_symvers, *patch_name, *output_obj;
bool has_pfe = false;

memset(&arguments, 0, sizeof(arguments));
argp_parse (&argp, argc, argv, 0, NULL, &arguments);
Expand All @@ -4067,6 +4165,12 @@ int main(int argc, char *argv[])

kelf_orig = kpatch_elf_open(orig_obj);
kelf_patched = kpatch_elf_open(patched_obj);

kpatch_set_pfe_link(kelf_orig);
kpatch_set_pfe_link(kelf_patched);
if (kelf_patched->has_pfe)
has_pfe = true;

kpatch_find_func_profiling_calls(kelf_orig);
kpatch_find_func_profiling_calls(kelf_patched);

Expand Down Expand Up @@ -4146,7 +4250,7 @@ int main(int argc, char *argv[])
kpatch_create_callbacks_objname_rela(kelf_out, parent_name);
kpatch_build_strings_section_data(kelf_out);

kpatch_create_mcount_sections(kelf_out);
kpatch_create_ftrace_callsite_sections(kelf_out, has_pfe);

/*
* At this point, the set of output sections and symbols is
Expand Down
23 changes: 21 additions & 2 deletions kpatch-build/kpatch-elf.c
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -608,6 +608,16 @@ struct kpatch_elf *kpatch_elf_open(const char *name)
kpatch_create_rela_list(kelf, relasec);
}

/*
* x86_64's pfe sections are only a side effect
* CONFIG_CALL_PADDING building with * -fpatchable-function-entry=16,16,
* These sections aren't used by ftrace on this arch, so do not
* bother reading/writing them for x86_64.
*/
if (kelf->arch != X86_64)
if (find_section_by_name(&kelf->sections, "__patchable_function_entries"))
kelf->has_pfe = true;

return kelf;
}

Expand Down Expand Up @@ -643,6 +653,9 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf)
printf(", secsym-> %s", sec->secsym->name);
if (sec->rela)
printf(", rela-> %s", sec->rela->name);
if (sec->secsym && sec->secsym->pfe)
printf(", pfe-> [%d]",
(sec->secsym->pfe) == NULL ? -1 : (int)sec->secsym->pfe->index);
}
next:
printf("\n");
Expand All @@ -653,8 +666,10 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf)
printf("sym %02d, type %d, bind %d, ndx %02d, name %s (%s)",
sym->index, sym->type, sym->bind, sym->sym.st_shndx,
sym->name, status_str(sym->status));
if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT))
if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT)) {
printf(" -> %s", sym->sec->name);
printf(", profiling: %d", sym->has_func_profiling);
}
printf("\n");
}
}
Expand Down Expand Up @@ -923,6 +938,7 @@ struct section *create_section_pair(struct kpatch_elf *kelf, char *name,
relasec->sh.sh_type = SHT_RELA;
relasec->sh.sh_entsize = sizeof(GElf_Rela);
relasec->sh.sh_addralign = 8;
relasec->sh.sh_flags = SHF_INFO_LINK;

/* set text rela section pointer */
sec->rela = relasec;
Expand Down Expand Up @@ -977,8 +993,11 @@ void kpatch_reindex_elements(struct kpatch_elf *kelf)
index = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
sym->index = index++;
if (sym->sec)
if (sym->sec) {
sym->sym.st_shndx = (unsigned short)sym->sec->index;
if (sym->pfe)
sym->pfe->sh.sh_link = sym->sec->index;
}
else if (sym->sym.st_shndx != SHN_ABS &&
sym->sym.st_shndx != SHN_LIVEPATCH)
sym->sym.st_shndx = SHN_UNDEF;
Expand Down
2 changes: 2 additions & 0 deletions kpatch-build/kpatch-elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ struct symbol {
};
int has_func_profiling;
bool is_pfx;
struct section *pfe;
};

struct rela {
Expand Down Expand Up @@ -125,6 +126,7 @@ struct kpatch_elf {
struct list_head strings;
Elf_Data *symtab_shndx;
int fd;
bool has_pfe;
};

/*******************
Expand Down

0 comments on commit 7b6fee6

Please sign in to comment.