Makefile 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #
  2. # arch/arm64/Makefile
  3. #
  4. # This file is included by the global makefile so that you can add your own
  5. # architecture-specific flags and dependencies.
  6. #
  7. # This file is subject to the terms and conditions of the GNU General Public
  8. # License. See the file "COPYING" in the main directory of this archive
  9. # for more details.
  10. #
  11. # Copyright (C) 1995-2001 by Russell King
  12. LDFLAGS_vmlinux :=--no-undefined -X
  13. ifeq ($(CONFIG_RELOCATABLE), y)
  14. # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
  15. # for relative relocs, since this leads to better Image compression
  16. # with the relocation offsets always being zero.
  17. LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
  18. $(call ld-option, --no-apply-dynamic-relocs)
  19. endif
  20. ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
  21. ifeq ($(call ld-option, --fix-cortex-a53-843419),)
  22. $(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
  23. else
  24. LDFLAGS_vmlinux += --fix-cortex-a53-843419
  25. endif
  26. endif
  27. ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
  28. ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
  29. $(warning LSE atomics not supported by binutils)
  30. endif
  31. endif
  32. cc_has_k_constraint := $(call try-run,echo \
  33. 'int main(void) { \
  34. asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \
  35. return 0; \
  36. }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
  37. ifeq ($(CONFIG_BROKEN_GAS_INST),y)
  38. $(warning Detected assembler with broken .inst; disassembly will be unreliable)
  39. endif
  40. KBUILD_CFLAGS += -mgeneral-regs-only \
  41. $(compat_vdso) $(cc_has_k_constraint)
  42. KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
  43. KBUILD_AFLAGS += $(compat_vdso)
  44. KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
  45. KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
  46. # Avoid generating .eh_frame* sections.
  47. KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
  48. KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
  49. ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
  50. prepare: stack_protector_prepare
  51. stack_protector_prepare: prepare0
  52. $(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg \
  53. -mstack-protector-guard-reg=sp_el0 \
  54. -mstack-protector-guard-offset=$(shell \
  55. awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
  56. include/generated/asm-offsets.h))
  57. endif
  58. # Ensure that if the compiler supports branch protection we default it
  59. # off, this will be overridden if we are using branch protection.
  60. branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
  61. ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
  62. branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
  63. # We enable additional protection for leaf functions as there is some
  64. # narrow potential for ROP protection benefits and no substantial
  65. # performance impact has been observed.
  66. ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
  67. branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
  68. else
  69. branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
  70. endif
  71. # -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
  72. # compiler to generate them and consequently to break the single image contract
  73. # we pass it only to the assembler. This option is utilized only in case of non
  74. # integrated assemblers.
  75. ifeq ($(CONFIG_AS_HAS_PAC), y)
  76. asm-arch := armv8.3-a
  77. endif
  78. endif
  79. KBUILD_CFLAGS += $(branch-prot-flags-y)
  80. ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
  81. # make sure to pass the newest target architecture to -march.
  82. asm-arch := armv8.4-a
  83. endif
  84. ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
  85. # make sure to pass the newest target architecture to -march.
  86. asm-arch := armv8.5-a
  87. endif
  88. ifdef asm-arch
  89. KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
  90. -DARM64_ASM_ARCH='"$(asm-arch)"'
  91. endif
  92. ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
  93. KBUILD_CFLAGS += -ffixed-x18
  94. endif
  95. ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
  96. KBUILD_CPPFLAGS += -mbig-endian
  97. CHECKFLAGS += -D__AARCH64EB__
  98. # Prefer the baremetal ELF build target, but not all toolchains include
  99. # it so fall back to the standard linux version if needed.
  100. KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
  101. UTS_MACHINE := aarch64_be
  102. else
  103. KBUILD_CPPFLAGS += -mlittle-endian
  104. CHECKFLAGS += -D__AARCH64EL__
  105. # Same as above, prefer ELF but fall back to linux target if needed.
  106. KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
  107. UTS_MACHINE := aarch64
  108. endif
  109. ifeq ($(CONFIG_LD_IS_LLD), y)
  110. KBUILD_LDFLAGS += -z norelro
  111. endif
  112. CHECKFLAGS += -D__aarch64__
  113. ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
  114. KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
  115. CC_FLAGS_FTRACE := -fpatchable-function-entry=2
  116. endif
  117. # Default value
  118. head-y := arch/arm64/kernel/head.o
  119. ifeq ($(CONFIG_KASAN_SW_TAGS), y)
  120. KASAN_SHADOW_SCALE_SHIFT := 4
  121. else ifeq ($(CONFIG_KASAN_GENERIC), y)
  122. KASAN_SHADOW_SCALE_SHIFT := 3
  123. endif
  124. KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
  125. KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
  126. KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
  127. core-y += arch/arm64/
  128. libs-y := arch/arm64/lib/ $(libs-y)
  129. libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
  130. # Default target when executing plain make
  131. boot := arch/arm64/boot
  132. KBUILD_IMAGE := $(boot)/Image.gz
  133. # Don't compile Image in mixed build with "all" target
  134. ifndef KBUILD_MIXED_TREE
  135. all: Image.gz
  136. endif
  137. Image: vmlinux
  138. $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
  139. Image.%: Image
  140. $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
  141. zinstall install:
  142. $(Q)$(MAKE) $(build)=$(boot) $@
  143. PHONY += vdso_install
  144. vdso_install:
  145. $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
  146. $(if $(CONFIG_COMPAT_VDSO), \
  147. $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
  148. # We use MRPROPER_FILES and CLEAN_FILES now
  149. archclean:
  150. $(Q)$(MAKE) $(clean)=$(boot)
  151. ifeq ($(KBUILD_EXTMOD),)
  152. # We need to generate vdso-offsets.h before compiling certain files in kernel/.
  153. # In order to do that, we should use the archprepare target, but we can't since
  154. # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
  155. # asm-offsets.h is built in prepare0, for which archprepare is a dependency.
  156. # Therefore we need to generate the header after prepare0 has been made, hence
  157. # this hack.
  158. prepare: vdso_prepare
  159. vdso_prepare: prepare0
  160. $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
  161. $(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \
  162. $(build)=arch/arm64/kernel/vdso32 \
  163. include/generated/vdso32-offsets.h)
  164. endif
  165. define archhelp
  166. echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
  167. echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
  168. echo ' install - Install uncompressed kernel'
  169. echo ' zinstall - Install compressed kernel'
  170. echo ' Install using (your) ~/bin/installkernel or'
  171. echo ' (distribution) /sbin/installkernel or'
  172. echo ' install to $$(INSTALL_PATH) and run lilo'
  173. endef