From 0c652adeefae174c6d89c970b1f1da375d7de706 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Jun 26 2014 15:47:18 +0000 Subject: Add kernel-arm64.patch, see %changelog for more information --- diff --git a/config-arm64 b/config-arm64 index fece9cc..cea18f1 100644 --- a/config-arm64 +++ b/config-arm64 @@ -87,8 +87,10 @@ CONFIG_POWER_RESET_XGENE=y CONFIG_COMMON_CLK_XGENE=y CONFIG_AHCI_XGENE=m -CONFIG_PHY_XGENE=m +CONFIG_PHY_XGENE=y +CONFIG_NET_XGENE=m CONFIG_RTC_DRV_XGENE=m +CONFIG_RTC_DRV_EFI=m # not arm64 # CONFIG_GPIO_ADNP is not set diff --git a/config-generic b/config-generic index e9162d7..0a27448 100644 --- a/config-generic +++ b/config-generic @@ -5246,3 +5246,6 @@ CONFIG_FMC_CHARDEV=m # CONFIG_CRYPTO_KEY_TYPE is not set # CONFIG_PGP_LIBRARY is not set # CONFIG_PGP_PRELOAD is not set + +# CONFIG_RTC_DRV_EFI is not set +# CONFIG_NET_XGENE is not set diff --git a/kernel-arm64.patch b/kernel-arm64.patch new file mode 100644 index 0000000..56f64d1 --- /dev/null +++ b/kernel-arm64.patch @@ -0,0 +1,7644 @@ +git clone ssh://git.fedorahosted.org/git/kernel-arm64.git +git diff -p master...origin/devel >kernel-arm64.patch + +commit 0555d24c0fb9ce825a0eb16f1b8b4a73f5014408 +Author: Mark Salter +Date: Tue Jun 24 23:16:45 2014 -0400 + + perf: fix arm64 build error + + I'm seeing the following build error on arm64: + + In file included from util/event.c:3:0: + util/event.h:95:17: error: 'PERF_REGS_MAX' undeclared here (not in a function) + u64 cache_regs[PERF_REGS_MAX]; + ^ + + This patch adds a PEFF_REGS_MAX definition for arm64. + + Signed-off-by: Mark Salter + +commit ab1e5ae69aa0c7461a305c1f161229f8a22aff2b +Author: Mark Salter +Date: Mon Jun 23 00:34:17 2014 -0400 + + arm64: fix CONFIG_ZONE_DMA on systems with no 32-bit addressable DRAM + + Commit 2d5a5612bc (arm64: Limit the CMA buffer to 32-bit if ZONE_DMA) + forces the CMA buffer to be 32-bit addressable if CONFIG_ZONE_DMA is + defined. This breaks CMA on platforms with no 32-bit addressable DRAM. + This patch checks to make sure there is 32-bit addressable DRAM before + setting the 32-bit limit. If there is none, no limit is placed on the + CMA buffer. This allows a single kernel (with CONFIG_ZONE_DMA defined) + to support platforms requiring the 32-bit limit and platforms with no + 32-bit limit. + + Signed-off-by: Mark Salter + +commit e1651b99e0dc0f1e92dbe9ef34ff33496dce94b2 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:43 2014 +0100 + + arm64: KVM: vgic: add GICv3 world switch + + Introduce the GICv3 world switch code and helper functions, enabling + GICv2 emulation on GICv3 hardware. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit a0806e54bfb522e50530e356abe1108e108a1430 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:42 2014 +0100 + + KVM: ARM: vgic: add the GICv3 backend + + Introduce the support code for emulating a GICv2 on top of GICv3 + hardware. + + Acked-by: Catalin Marinas + Signed-off-by: Marc Zyngier + +commit 813813c877235d7a4499546913e360ca958e57a7 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:41 2014 +0100 + + arm64: KVM: move HCR_EL2.{IMO, FMO} manipulation into the vgic switch code + + GICv3 requires the IMO and FMO bits to be tightly coupled with some + of the interrupt controller's register switch. + + In order to have similar code paths, move the manipulation of these + bits to the GICv2 switch code. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 298ecc28f3f58453e56c2e5b6891679480fe32f9 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:40 2014 +0100 + + arm64: KVM: split GICv2 world switch from hyp code + + Move the GICv2 world switch code into its own file, and add the + necessary indirection to the arm64 switch code. + + Also introduce a new type field to the vgic_params structure. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 701b0fd0ac4c260fbe364248710bf37bdffde360 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:39 2014 +0100 + + arm64: KVM: remove __kvm_hyp_code_{start, end} from hyp.S + + We already have __hyp_text_{start,end} to express the boundaries + of the HYP text section, and __kvm_hyp_code_{start,end} are getting + in the way of a more modular world switch code. + + Just turn __kvm_hyp_code_{start,end} into #defines mapping the + linker-emited symbols. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 54f2e57b90ce894bb7312968344faf16624e7546 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:38 2014 +0100 + + KVM: ARM: vgic: revisit implementation of irqchip_in_kernel + + So far, irqchip_in_kernel() was implemented by testing the value of + vctrl_base, which worked fine with GICv2. + + With GICv3, this field is useless, as we're using system registers + instead of a emmory mapped interface. To solve this, add a boolean + flag indicating if the we're using a vgic or not. + + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 758dbee6188c8313ca4787e7f49d3959666229de +Author: Marc Zyngier +Date: Thu Jun 19 10:19:37 2014 +0100 + + KVM: ARM: vgic: split GICv2 backend from the main vgic code + + Brutally hack the innocent vgic code, and move the GICv2 specific code + to its own file, using vgic_ops and vgic_params as a way to pass + information between the two blocks. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 1b35a44bd60fb52bd919705b98d3ab5f5f2e0e7a +Author: Marc Zyngier +Date: Thu Jun 19 10:19:36 2014 +0100 + + KVM: ARM: introduce vgic_params structure + + Move all the data specific to a given GIC implementation into its own + little structure. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 33b5df2e6295cec0a2666b0e6f5d55778bdebd1e +Author: Marc Zyngier +Date: Thu Jun 19 10:19:35 2014 +0100 + + KVM: ARM: vgic: introduce vgic_enable + + Move the code dealing with enabling the VGIC on to vgic_ops. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit b5113a5f316d899b8fde3ac8025715bb15582347 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:34 2014 +0100 + + KVM: ARM: vgic: abstract VMCR access + + Instead of directly messing with with the GICH_VMCR bits for the CPU + interface save/restore code, add accessors that encode/decode the + entire set of registers exposed by VMCR. + + Not the most efficient thing, but given that this code is only used + by the save/restore code, performance is far from being critical. + + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit f32f60e78a61c7f1878e8576a944820db713d6bf +Author: Marc Zyngier +Date: Thu Jun 19 10:19:33 2014 +0100 + + KVM: ARM: vgic: move underflow handling to vgic_ops + + Move the code dealing with LR underflow handling to its own functions, + and make them accessible through vgic_ops. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 1c377524d163ce244e0567db0987b501307750bb +Author: Marc Zyngier +Date: Thu Jun 19 10:19:32 2014 +0100 + + KVM: ARM: vgic: abstract MISR decoding + + Instead of directly dealing with the GICH_MISR bits, move the code to + its own function and use a couple of public flags to represent the + actual state. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit cf16a437bb41f188a24142bef17ccc96f54ee29a +Author: Marc Zyngier +Date: Thu Jun 19 10:19:31 2014 +0100 + + KVM: ARM: vgic: abstract EISR bitmap access + + Move the GICH_EISR access to its own function. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 8e35c914d0fee0ed9334336590f72df618c42d44 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:30 2014 +0100 + + KVM: ARM: vgic: abstract access to the ELRSR bitmap + + Move the GICH_ELRSR access to its own functions, and add them to + the vgic_ops structure. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 5e74572029116124aba6e057aba2f7106b651661 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:29 2014 +0100 + + KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives + + In order to split the various register manipulation from the main vgic + code, introduce a vgic_ops structure, and start by abstracting the + LR manipulation code with a couple of accessors. + + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 840d3614b64ad26e0f510bd2ef78bf427d91f778 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:28 2014 +0100 + + KVM: arm/arm64: vgic: move GICv2 registers to their own structure + + In order to make way for the GICv3 registers, move the v2-specific + registers to their own structure. + + Acked-by: Catalin Marinas + Reviewed-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 07a7980c0ca48f940b97a7be30db7700317813de +Author: Marc Zyngier +Date: Thu Jun 19 10:19:27 2014 +0100 + + arm64: boot protocol documentation update for GICv3 + + Linux has some requirements that must be satisfied in order to boot + on a system built with a GICv3. + + Acked-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit fb16d46188b56559112b10eb9d4cc10ff2d85c12 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:26 2014 +0100 + + arm64: GICv3 device tree binding documentation + + Add the necessary documentation to support GICv3. + + Cc: Thomas Gleixner + Cc: Mark Rutland + Cc: Jason Cooper + Acked-by: Catalin Marinas + Acked-by: Rob Herring + Acked-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit 06fca8017fe75cffdae40a9de3b1a864b649a308 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:25 2014 +0100 + + arm64: initial support for GICv3 + + The Generic Interrupt Controller (version 3) offers services that are + similar to GICv2, with a number of additional features: + - Affinity routing based on the CPU MPIDR (ARE) + - System register for the CPU interfaces (SRE) + - Support for more that 8 CPUs + - Locality-specific Peripheral Interrupts (LPIs) + - Interrupt Translation Services (ITS) + + This patch adds preliminary support for GICv3 with ARE and SRE, + non-secure mode only. It relies on higher exception levels to grant ARE + and SRE access. + + Support for LPI and ITS will be added at a later time. + + Cc: Thomas Gleixner + Cc: Jason Cooper + Reviewed-by: Zi Shen Lim + Reviewed-by: Christoffer Dall + Reviewed-by: Tirumalesh Chalamarla + Reviewed-by: Yun Wu + Reviewed-by: Zhen Lei + Tested-by: Tirumalesh Chalamarla + Tested-by: Radha Mohan Chintakuntla + Acked-by: Radha Mohan Chintakuntla + Acked-by: Catalin Marinas + Signed-off-by: Marc Zyngier + +commit 9e8004797a703dbcfd57b240119b350727887c43 +Author: Marc Zyngier +Date: Thu Jun 19 10:19:24 2014 +0100 + + ARM: GIC: move some bits of GICv2 to a library-type file + + A few GICv2 low-level function are actually very useful to GICv3, + and it makes some sense to share them across the two drivers. + They end-up in their own file, with an additional parameter used + to ensure an optional synchronization (unused on GICv2). + + Cc: Thomas Gleixner + Cc: Jason Cooper + Acked-by: Christoffer Dall + Signed-off-by: Marc Zyngier + +commit b4e6d74b54b13bb69b5d31a44ce1ae0118e7b9c7 +Author: Mark Salter +Date: Thu Jun 12 15:10:22 2014 -0400 + + rtc: ia64: allow other architectures to use EFI RTC + + Currently, the rtc-efi driver is restricted to ia64 only. + Newer architectures with EFI support may want to also use + that driver. This patch moves the platform device setup + from ia64 into drivers/rtc and allow any architecture with + CONFIG_EFI=y to use the rtc-efi driver. + + Signed-off-by: Mark Salter + +commit 7362bb3ff47a277d57e2547b463dac40c51ee09b +Author: Don Dutile +Date: Tue Mar 25 20:22:26 2014 -0400 + + pmu: Adding support for Xgene PMUs + + Message-id: <1395778948-47814-2-git-send-email-ddutile@redhat.com> + Patchwork-id: 78602 + O-Subject: [PATCH 1/3] pmu: Adding support for Xgene PMUs + Bugzilla: 1079110 + + Backport of these two posted (but not upstream) patches. + Combined into single patch due to gic-patch dependency. + + Signed-off-by: Donald Dutile + +commit fa07a775e9c349106913e3931ad8c79a629d52a6 +Author: Mark Salter +Date: Sun Jun 15 09:06:55 2014 -0400 + + arm64: fix up APM Mustang devicetree + + These are changes needed when loading device tree blob built with + kernel. i.e. with grub. These are not needed when using devicetree + from Tianocore which will be fixed up at tianocore runtime. + + Signed-off-by: Mark Salter + +commit 039c600b601646a609356c379f9180499bc1fc06 +Author: Kyle McMartin +Date: Tue May 13 22:25:26 2014 -0400 + + arm64: don't set READ_IMPLIES_EXEC for EM_AARCH64 ELF objects + + Message-id: <20140513222526.GC26038@redacted.bos.redhat.com> + Patchwork-id: 79789 + O-Subject: [ACADIA PATCH] arm64: don't set READ_IMPLIES_EXEC for EM_AARCH64 ELF objects + Bugzilla: 1085528 + + BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1085528 + Upstream: submitted soon + + [Sadly this isn't (yet) sufficient... but it fixes at least one issue + here... cat /proc/$$/personality shows READ_IMPLIES_EXEC before. I'll + try to figure the rest out tomorrow.] + + Currently, we're accidentally ending up with executable stacks on + AArch64 when the ABI says we shouldn't be, and relying on glibc to fix + things up for us when we're loaded. However, SELinux will deny us + mucking with the stack, and hit us with execmem AVCs. + + The reason this is happening is somewhat complex: + + fs/binfmt_elf.c:load_elf_binary() + - initializes executable_stack = EXSTACK_DEFAULT implying the + architecture should make up its mind. + - does a pile of loading goo + - runs through the program headers, looking for PT_GNU_STACK + and setting (or unsetting) executable_stack if it finds it. + + This is our first problem, we won't generate these unless an + executable stack is explicitly requested. + + - more ELF loading goo + - sets whether we're a compat task or not (TIF_32BIT) based on compat.h + - for compat reasons (pre-GNU_STACK) checks if the READ_IMPLIES_EXEC + flag should be set for ancient toolchains + + Here's our second problem, we test if read_implies_exec based on + stk != EXSTACK_DISABLE_X, which is true since stk == EXSTACK_DEFAULT. + + So we set current->personality |= READ_IMPLIES_EXEC like a broken + legacy toolchain would want. + + - Now we call setup_arg_pages to set up the stack... + + fs/exec.c:setup_arg_pages() + - lots of magic happens here + - vm_flags gets initialized to VM_STACK_FLAGS + + Here's our third problem, VM_STACK_FLAGS on arm64 is + VM_DEFAULT_DATA_FLAG which tests READ_IMPLIES_EXEC and sets VM_EXEC + if it's true. So we end up with an executable stack mapping, since we + don't have executable_stack set (it's still EXSTACK_DEFAULT at this + point) to unset it anywhere. + + Bang. execstack AVC when the program starts running. + + The easiest way I can see to fix this is to test if we're a legacy task + and fix it up there. But that's not as simple as it sounds, because + the 32-bit ABI depends on what revision of the CPU we've enabled (not + that it matters since we're ARMv8...) Regardless, in the compat case, + set READ_IMPLIES_EXEC if we've found a GNU_STACK header which explicitly + requested it as in arch/arm/kernel/elf.c:arm_elf_read_implies_exec(). + + Signed-off-by: Kyle McMartin + Signed-off-by: Donald Dutile + +commit 36988493876f40bfcde0f3ed20c7386792297d6e +Author: Mark Salter +Date: Fri Jun 13 00:37:11 2014 -0400 + + arm64: fix soft lockup due to large tlb flush range + + Under certain loads, this soft lockup has been observed: + + BUG: soft lockup - CPU#2 stuck for 22s! [ip6tables:1016] + Modules linked in: ip6t_rpfilter ip6t_REJECT cfg80211 rfkill xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw vfat fat efivarfs xfs libcrc32c + + CPU: 2 PID: 1016 Comm: ip6tables Not tainted 3.13.0-0.rc7.30.sa2.aarch64 #1 + task: fffffe03e81d1400 ti: fffffe03f01f8000 task.ti: fffffe03f01f8000 + PC is at __cpu_flush_kern_tlb_range+0xc/0x40 + LR is at __purge_vmap_area_lazy+0x28c/0x3ac + pc : [] lr : [] pstate: 80000145 + sp : fffffe03f01fbb70 + x29: fffffe03f01fbb70 x28: fffffe03f01f8000 + x27: fffffe0000b19000 x26: 00000000000000d0 + x25: 000000000000001c x24: fffffe03f01fbc50 + x23: fffffe03f01fbc58 x22: fffffe03f01fbc10 + x21: fffffe0000b2a3f8 x20: 0000000000000802 + x19: fffffe0000b2a3c8 x18: 000003fffdf52710 + x17: 000003ff9d8bb910 x16: fffffe000050fbfc + x15: 0000000000005735 x14: 000003ff9d7e1a5c + x13: 0000000000000000 x12: 000003ff9d7e1a5c + x11: 0000000000000007 x10: fffffe0000c09af0 + x9 : fffffe0000ad1000 x8 : 000000000000005c + x7 : fffffe03e8624000 x6 : 0000000000000000 + x5 : 0000000000000000 x4 : 0000000000000000 + x3 : fffffe0000c09cc8 x2 : 0000000000000000 + x1 : 000fffffdfffca80 x0 : 000fffffcd742150 + + The __cpu_flush_kern_tlb_range() function looks like: + + ENTRY(__cpu_flush_kern_tlb_range) + dsb sy + lsr x0, x0, #12 + lsr x1, x1, #12 + 1: tlbi vaae1is, x0 + add x0, x0, #1 + cmp x0, x1 + b.lo 1b + dsb sy + isb + ret + ENDPROC(__cpu_flush_kern_tlb_range) + + The above soft lockup shows the PC at tlbi insn with: + + x0 = 0x000fffffcd742150 + x1 = 0x000fffffdfffca80 + + So __cpu_flush_kern_tlb_range has 0x128ba930 tlbi flushes left + after it has already been looping for 23 seconds!. + + Looking up one frame at __purge_vmap_area_lazy(), there is: + + ... + list_for_each_entry_rcu(va, &vmap_area_list, list) { + if (va->flags & VM_LAZY_FREE) { + if (va->va_start < *start) + *start = va->va_start; + if (va->va_end > *end) + *end = va->va_end; + nr += (va->va_end - va->va_start) >> PAGE_SHIFT; + list_add_tail(&va->purge_list, &valist); + va->flags |= VM_LAZY_FREEING; + va->flags &= ~VM_LAZY_FREE; + } + } + ... + if (nr || force_flush) + flush_tlb_kernel_range(*start, *end); + + So if two areas are being freed, the range passed to + flush_tlb_kernel_range() may be as large as the vmalloc + space. For arm64, this is ~240GB for 4k pagesize and ~2TB + for 64kpage size. + + This patch works around this problem by adding a loop limit. + If the range is larger than the limit, use flush_tlb_all() + rather than flushing based on individual pages. The limit + chosen is arbitrary and would be better if based on the + actual size of the tlb. I looked through the ARM ARM but + didn't see any easy way to get the actual tlb size, so for + now the arbitrary limit is better than the soft lockup. + + Signed-off-by: Mark Salter + +commit 6443ca61dca1a50a86bb3a1678799a9227a83335 +Author: Mark Salter +Date: Tue Jun 24 09:50:28 2014 -0400 + + arm64: use EFI as last resort for reboot and poweroff + + Wire in support for EFI reboot and poweroff functions. We use these + only if no other mechanism has been registered with arm_pm_reboot + and/or pm_power_off respectively. + + Signed-off-by: Mark Salter + +commit 06191eb6c796a0678be663ce77e3abeb18b0b3f7 +Author: Matt Fleming +Date: Thu Jun 19 14:40:25 2014 +0100 + + x86/reboot: Add EFI reboot quirk for ACPI Hardware Reduced flag + + It appears that the BayTrail-T class of hardware requires EFI in order + to powerdown and reboot and no other reliable method exists. + + This quirk is generally applicable to all hardware that has the ACPI + Hardware Reduced bit set, since usually ACPI would be the preferred + method. + + Cc: Len Brown + Cc: Mark Salter + Cc: "Rafael J. Wysocki" + Signed-off-by: Matt Fleming + +commit f9fbfac6e78f4772e9ea83fe98b9d65a04b66d7b +Author: Matt Fleming +Date: Thu Jun 19 14:40:24 2014 +0100 + + efi/reboot: Allow powering off machines using EFI + + Not only can EfiResetSystem() be used to reboot, it can also be used to + power down machines. + + By and large, this functionality doesn't work very well across the range + of EFI machines in the wild, so it should definitely only be used as a + last resort. In an ideal world, this wouldn't be needed at all. + + Unfortunately, we're starting to see machines where EFI is the *only* + reliable way to power down, and nothing else, not PCI, not ACPI, works. + + efi_poweroff_required() should be implemented on a per-architecture + basis, since exactly when we should be using EFI runtime services is a + platform-specific decision. There's no analogue for reboot because each + architecture handles reboot very differently - the x86 code in + particular is pretty complex. + + Patches to enable this for specific classes of hardware will be + submitted separately. + + Cc: Mark Salter + Signed-off-by: Matt Fleming + +commit 3ab8d8d210f5e819438e197bc95d44aeb216a772 +Author: Matt Fleming +Date: Thu Jun 19 14:40:23 2014 +0100 + + efi/reboot: Add generic wrapper around EfiResetSystem() + + Implement efi_reboot(), which is really just a wrapper around the + EfiResetSystem() EFI runtime service, but it does at least allow us to + funnel all callers through a single location. + + It also simplifies the callsites since users no longer need to check to + see whether EFI_RUNTIME_SERVICES are enabled. + + Cc: Tony Luck + Cc: Mark Salter + Signed-off-by: Matt Fleming + +commit 42218bfddcbe38f0b92674723ebd9de2fb7e8c4e +Author: Michal Nazarewicz +Date: Mon Jun 23 21:40:47 2014 +0200 + + mm: page_alloc: fix CMA area initialisation when pageblock > MAX_ORDER + + With a kernel configured with ARM64_64K_PAGES && !TRANSPARENT_HUGEPAGE, + the following is triggered at early boot: + + SMP: Total of 8 processors activated. + devtmpfs: initialized + Unable to handle kernel NULL pointer dereference at virtual address 00000008 + pgd = fffffe0000050000 + [00000008] *pgd=00000043fba00003, *pmd=00000043fba00003, *pte=00e0000078010407 + Internal error: Oops: 96000006 [#1] SMP + Modules linked in: + CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.15.0-rc864k+ #44 + task: fffffe03bc040000 ti: fffffe03bc080000 task.ti: fffffe03bc080000 + PC is at __list_add+0x10/0xd4 + LR is at free_one_page+0x270/0x638 + ... + Call trace: + [] __list_add+0x10/0xd4 + [] free_one_page+0x26c/0x638 + [] __free_pages_ok.part.52+0x84/0xbc + [] __free_pages+0x74/0xbc + [] init_cma_reserved_pageblock+0xe8/0x104 + [] cma_init_reserved_areas+0x190/0x1e4 + [] do_one_initcall+0xc4/0x154 + [] kernel_init_freeable+0x204/0x2a8 + [] kernel_init+0xc/0xd4 + + This happens because init_cma_reserved_pageblock() calls + __free_one_page() with pageblock_order as page order but it is bigger + han MAX_ORDER. This in turn causes accesses past zone->free_list[]. + + Fix the problem by changing init_cma_reserved_pageblock() such that it + splits pageblock into individual MAX_ORDER pages if pageblock is + bigger than a MAX_ORDER page. + + In cases where !CONFIG_HUGETLB_PAGE_SIZE_VARIABLE, which is all + architectures expect for ia64, powerpc and tile at the moment, the + “pageblock_order > MAX_ORDER” condition will be optimised out since + both sides of the operator are constants. In cases where pageblock + size is variable, the performance degradation should not be + significant anyway since init_cma_reserved_pageblock() is called + only at boot time at most MAX_CMA_AREAS times which by default is + eight. + + Cc: stable@vger.kernel.org + Signed-off-by: Michal Nazarewicz + Reported-by: Mark Salter + Tested-by: Christopher Covington + +commit 7e20b29ffff9de89d22779bcf8891b2a6bf3ab63 +Author: Suman Tripathi +Date: Thu Jun 19 06:51:32 2014 -0400 + + ata: Fix the dma state machine lockup for the IDENTIFY DEVICE PIO mode command. + + This patch fixes the dma state machine lockup due to the processing + of IDENTIFY DEVICE PIO mode command. The X-Gene AHCI controller + has an errata in which it cannot clear the BSY bit after + receiving the PIO setup FIS and results the dma state machine to go + into the CMFatalErrorUpdate state resulting in the dma state + machine lockup. This patch also removes the dma restart workaround + from the read_id function as the read_id function is only called by + libata layer for ATA_INTERNAL commands. But for somecases eg: + PORT MULTIPLIER and udev, the framework will enumerate using SCSI + commands and it will not call read_id function. + + Signed-off-by: Loc Ho + Signed-off-by: Suman Tripathi + +commit 2494fae7825c244a6f173241c52e8ab7a38006e6 +Author: Suman Tripathi +Date: Thu Jun 19 06:50:08 2014 -0400 + + libahci: Implement the function ahci_restart_engine to restart the port dma engine. + + This patch adds an function to restart the port dma engine. + + Signed-off-by: Loc Ho + Signed-off-by: Suman Tripathi + +commit 3b3bef5b10473f9986de45022ae8cc528bfc8464 +Author: Iyappan Subramanian +Date: Fri Jun 20 16:18:16 2014 -0700 + + drivers: net: Add APM X-Gene SoC ethernet driver support. + + This patch adds network driver for APM X-Gene SoC ethernet. + + Signed-off-by: Iyappan Subramanian + Signed-off-by: Ravi Patel + Signed-off-by: Keyur Chudgar + +commit b4ef14e44cda920313a6fa63382b82e2bd1964e1 +Author: Iyappan Subramanian +Date: Fri Jun 20 16:18:15 2014 -0700 + + dts: Add bindings for APM X-Gene SoC ethernet driver + + This patch adds bindings for APM X-Gene SoC ethernet driver. + + Signed-off-by: Iyappan Subramanian + Signed-off-by: Ravi Patel + Signed-off-by: Keyur Chudgar + +commit 1173f314654d6edb5072d4f47908520cf7fcc9c4 +Author: Iyappan Subramanian +Date: Fri Jun 20 16:18:14 2014 -0700 + + Documentation: dts: Add bindings for APM X-Gene SoC ethernet driver + + This patch adds documentation for APM X-Gene SoC ethernet DTS binding. + + Signed-off-by: Iyappan Subramanian + Signed-off-by: Ravi Patel + Signed-off-by: Keyur Chudgar + +commit 0931546a1d4a3e89072fcb9f3a3755adf49fb99c +Author: Iyappan Subramanian +Date: Fri Jun 20 16:18:13 2014 -0700 + + MAINTAINERS: Add entry for APM X-Gene SoC ethernet driver + + This patch adds a MAINTAINERS entry for APM X-Gene SoC + ethernet driver. + + Signed-off-by: Iyappan Subramanian + Signed-off-by: Ravi Patel + Signed-off-by: Keyur Chudgar + +diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt +index 37fc4f6..e28ccec 100644 +--- a/Documentation/arm64/booting.txt ++++ b/Documentation/arm64/booting.txt +@@ -141,6 +141,12 @@ Before jumping into the kernel, the following conditions must be met: + the kernel image will be entered must be initialised by software at a + higher exception level to prevent execution in an UNKNOWN state. + ++ For systems with a GICv3 interrupt controller, it is expected that: ++ - If EL3 is present, it must program ICC_SRE_EL3.Enable (bit 3) to ++ 0b1 and ICC_SRE_EL3.SRE (bit 0) to 0b1. ++ - If the kernel is entered at EL1, EL2 must set ICC_SRE_EL2.Enable ++ (bit 3) to 0b1 and ICC_SRE_EL2.SRE (bit 0) to 0b1. ++ + The requirements described above for CPU mode, caches, MMUs, architected + timers, coherency and system registers apply to all CPUs. All CPUs must + enter the kernel in the same exception level. +diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt +new file mode 100644 +index 0000000..33cd05e +--- /dev/null ++++ b/Documentation/devicetree/bindings/arm/gic-v3.txt +@@ -0,0 +1,79 @@ ++* ARM Generic Interrupt Controller, version 3 ++ ++AArch64 SMP cores are often associated with a GICv3, providing Private ++Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI), ++Software Generated Interrupts (SGI), and Locality-specific Peripheral ++Interrupts (LPI). ++ ++Main node required properties: ++ ++- compatible : should at least contain "arm,gic-v3". ++- interrupt-controller : Identifies the node as an interrupt controller ++- #interrupt-cells : Specifies the number of cells needed to encode an ++ interrupt source. Must be a single cell with a value of at least 3. ++ ++ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI ++ interrupts. Other values are reserved for future use. ++ ++ The 2nd cell contains the interrupt number for the interrupt type. ++ SPI interrupts are in the range [0-987]. PPI interrupts are in the ++ range [0-15]. ++ ++ The 3rd cell is the flags, encoded as follows: ++ bits[3:0] trigger type and level flags. ++ 1 = edge triggered ++ 4 = level triggered ++ ++ Cells 4 and beyond are reserved for future use. When the 1st cell ++ has a value of 0 or 1, cells 4 and beyond act as padding, and may be ++ ignored. It is recommended that padding cells have a value of 0. ++ ++- reg : Specifies base physical address(s) and size of the GIC ++ registers, in the following order: ++ - GIC Distributor interface (GICD) ++ - GIC Redistributors (GICR), one range per redistributor region ++ - GIC CPU interface (GICC) ++ - GIC Hypervisor interface (GICH) ++ - GIC Virtual CPU interface (GICV) ++ ++ GICC, GICH and GICV are optional. ++ ++- interrupts : Interrupt source of the VGIC maintenance interrupt. ++ ++Optional ++ ++- redistributor-stride : If using padding pages, specifies the stride ++ of consecutive redistributors. Must be a multiple of 64kB. ++ ++- #redistributor-regions: The number of independent contiguous regions ++ occupied by the redistributors. Required if more than one such ++ region is present. ++ ++Examples: ++ ++ gic: interrupt-controller@2cf00000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x0 0x2f000000 0 0x10000>, // GICD ++ <0x0 0x2f100000 0 0x200000>, // GICR ++ <0x0 0x2c000000 0 0x2000>, // GICC ++ <0x0 0x2c010000 0 0x2000>, // GICH ++ <0x0 0x2c020000 0 0x2000>; // GICV ++ interrupts = <1 9 4>; ++ }; ++ ++ gic: interrupt-controller@2c010000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ redistributor-stride = <0x0 0x40000>; // 256kB stride ++ #redistributor-regions = <2>; ++ reg = <0x0 0x2c010000 0 0x10000>, // GICD ++ <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31 ++ <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63 ++ <0x0 0x2c040000 0 0x2000>, // GICC ++ <0x0 0x2c060000 0 0x2000>, // GICH ++ <0x0 0x2c080000 0 0x2000>; // GICV ++ interrupts = <1 9 4>; ++ }; +diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +new file mode 100644 +index 0000000..3e2a295 +--- /dev/null ++++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +@@ -0,0 +1,72 @@ ++APM X-Gene SoC Ethernet nodes ++ ++Ethernet nodes are defined to describe on-chip ethernet interfaces in ++APM X-Gene SoC. ++ ++Required properties: ++- compatible: Should be "apm,xgene-enet" ++- reg: Address and length of the register set for the device. It contains the ++ information of registers in the same order as described by reg-names ++- reg-names: Should contain the register set names ++ "enet_csr": Ethernet control and status register address space ++ "ring_csr": Descriptor ring control and status register address space ++ "ring_cmd": Descriptor ring command register address space ++- interrupts: Ethernet main interrupt ++- clocks: Reference to the clock entry. ++- local-mac-address: MAC address assigned to this device ++- phy-connection-type: Interface type between ethernet device and PHY device ++- phy-handle: Reference to a PHY node connected to this device ++ ++- mdio: Device tree subnode with the following required ++ properties: ++ ++ - compatible: Must be "apm,xgene-mdio". ++ - #address-cells: Must be <1>. ++ - #size-cells: Must be <0>. ++ ++ For the phy on the mdio bus, there must be a node with the following ++ fields: ++ ++ - compatible: PHY identifier. Please refer ./phy.txt for the format. ++ - reg: The ID number for the phy. ++ ++Optional properties: ++- status : Should be "ok" or "disabled" for enabled/disabled. ++ Default is "ok". ++ ++ ++Example: ++ menetclk: menetclk { ++ compatible = "apm,xgene-device-clock"; ++ clock-output-names = "menetclk"; ++ status = "ok"; ++ }; ++ ++ menet: ethernet@17020000 { ++ compatible = "apm,xgene-enet"; ++ status = "disabled"; ++ reg = <0x0 0x17020000 0x0 0xd100>, ++ <0x0 0X17030000 0x0 0X400>, ++ <0x0 0X10000000 0x0 0X200>; ++ reg-names = "enet_csr", "ring_csr", "ring_cmd"; ++ interrupts = <0x0 0x3c 0x4>; ++ clocks = <&menetclk 0>; ++ local-mac-address = [00 01 73 00 00 01]; ++ phy-connection-type = "rgmii"; ++ phy-handle = <&menetphy>; ++ mdio { ++ compatible = "apm,xgene-mdio"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ menetphy: menetphy@3 { ++ compatible = "ethernet-phy-id001c.c915"; ++ reg = <0x3>; ++ }; ++ ++ }; ++ }; ++ ++/* Board-specific peripheral configurations */ ++&menet { ++ status = "ok"; ++}; +diff --git a/MAINTAINERS b/MAINTAINERS +index 3cc94ff..45a142e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -700,6 +700,14 @@ S: Maintained + F: drivers/net/appletalk/ + F: net/appletalk/ + ++APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER ++M: Iyappan Subramanian ++M: Keyur Chudgar ++M: Ravi Patel ++S: Supported ++F: drivers/net/ethernet/apm/xgene/ ++F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt ++ + APTINA CAMERA SENSOR PLL + M: Laurent Pinchart + L: linux-media@vger.kernel.org +diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h +index 193ceaf..d6d5227 100644 +--- a/arch/arm/include/asm/kvm_host.h ++++ b/arch/arm/include/asm/kvm_host.h +@@ -225,6 +225,11 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) + return 0; + } + ++static inline void vgic_arch_setup(const struct vgic_params *vgic) ++{ ++ BUG_ON(vgic->type != VGIC_V2); ++} ++ + int kvm_perf_init(void); + int kvm_perf_teardown(void); + +diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c +index 85598b5..713e807 100644 +--- a/arch/arm/kernel/asm-offsets.c ++++ b/arch/arm/kernel/asm-offsets.c +@@ -182,13 +182,13 @@ int main(void) + DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); + #ifdef CONFIG_KVM_ARM_VGIC + DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); +- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); +- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); +- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); +- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); +- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); +- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); +- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); ++ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); ++ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); ++ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); ++ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); ++ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); ++ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); ++ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); + DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); + #ifdef CONFIG_KVM_ARM_TIMER + DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); +diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile +index 789bca9..f7057ed 100644 +--- a/arch/arm/kvm/Makefile ++++ b/arch/arm/kvm/Makefile +@@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o + obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o + obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o + obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o ++obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o + obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o +diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S +index 76af9302..e4eaf30 100644 +--- a/arch/arm/kvm/interrupts_head.S ++++ b/arch/arm/kvm/interrupts_head.S +@@ -421,14 +421,14 @@ vcpu .req r0 @ vcpu pointer always in r0 + ldr r9, [r2, #GICH_ELRSR1] + ldr r10, [r2, #GICH_APR] + +- str r3, [r11, #VGIC_CPU_HCR] +- str r4, [r11, #VGIC_CPU_VMCR] +- str r5, [r11, #VGIC_CPU_MISR] +- str r6, [r11, #VGIC_CPU_EISR] +- str r7, [r11, #(VGIC_CPU_EISR + 4)] +- str r8, [r11, #VGIC_CPU_ELRSR] +- str r9, [r11, #(VGIC_CPU_ELRSR + 4)] +- str r10, [r11, #VGIC_CPU_APR] ++ str r3, [r11, #VGIC_V2_CPU_HCR] ++ str r4, [r11, #VGIC_V2_CPU_VMCR] ++ str r5, [r11, #VGIC_V2_CPU_MISR] ++ str r6, [r11, #VGIC_V2_CPU_EISR] ++ str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] ++ str r8, [r11, #VGIC_V2_CPU_ELRSR] ++ str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] ++ str r10, [r11, #VGIC_V2_CPU_APR] + + /* Clear GICH_HCR */ + mov r5, #0 +@@ -436,7 +436,7 @@ vcpu .req r0 @ vcpu pointer always in r0 + + /* Save list registers */ + add r2, r2, #GICH_LR0 +- add r3, r11, #VGIC_CPU_LR ++ add r3, r11, #VGIC_V2_CPU_LR + ldr r4, [r11, #VGIC_CPU_NR_LR] + 1: ldr r6, [r2], #4 + str r6, [r3], #4 +@@ -463,9 +463,9 @@ vcpu .req r0 @ vcpu pointer always in r0 + add r11, vcpu, #VCPU_VGIC_CPU + + /* We only restore a minimal set of registers */ +- ldr r3, [r11, #VGIC_CPU_HCR] +- ldr r4, [r11, #VGIC_CPU_VMCR] +- ldr r8, [r11, #VGIC_CPU_APR] ++ ldr r3, [r11, #VGIC_V2_CPU_HCR] ++ ldr r4, [r11, #VGIC_V2_CPU_VMCR] ++ ldr r8, [r11, #VGIC_V2_CPU_APR] + + str r3, [r2, #GICH_HCR] + str r4, [r2, #GICH_VMCR] +@@ -473,7 +473,7 @@ vcpu .req r0 @ vcpu pointer always in r0 + + /* Restore list registers */ + add r2, r2, #GICH_LR0 +- add r3, r11, #VGIC_CPU_LR ++ add r3, r11, #VGIC_V2_CPU_LR + ldr r4, [r11, #VGIC_CPU_NR_LR] + 1: ldr r6, [r3], #4 + str r6, [r2], #4 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index a474de34..7fc6e2e 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -10,6 +10,7 @@ config ARM64 + select ARM_AMBA + select ARM_ARCH_TIMER + select ARM_GIC ++ select ARM_GIC_V3 + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS + select COMMON_CLK +diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts +index 6541962..b2f5622 100644 +--- a/arch/arm64/boot/dts/apm-mustang.dts ++++ b/arch/arm64/boot/dts/apm-mustang.dts +@@ -28,3 +28,7 @@ + &serial0 { + status = "ok"; + }; ++ ++&menet { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi +index 40aa96c..846ee3a 100644 +--- a/arch/arm64/boot/dts/apm-storm.dtsi ++++ b/arch/arm64/boot/dts/apm-storm.dtsi +@@ -24,56 +24,56 @@ + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x000>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@001 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x001>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@100 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@101 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@200 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@201 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@300 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + cpu@301 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "spin-table"; +- cpu-release-addr = <0x1 0x0000fff8>; ++ cpu-release-addr = <0x40 0x0000f000>; + }; + }; + +@@ -97,6 +97,11 @@ + clock-frequency = <50000000>; + }; + ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = <1 12 0xff04>; ++ }; ++ + soc { + compatible = "simple-bus"; + #address-cells = <2>; +@@ -167,14 +172,13 @@ + clock-output-names = "ethclk"; + }; + +- eth8clk: eth8clk { ++ menetclk: menetclk { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <ðclk 0>; +- clock-names = "eth8clk"; + reg = <0x0 0x1702C000 0x0 0x1000>; + reg-names = "csr-reg"; +- clock-output-names = "eth8clk"; ++ clock-output-names = "menetclk"; + }; + + sataphy1clk: sataphy1clk@1f21c000 { +@@ -278,7 +282,7 @@ + compatible = "ns16550a"; + reg = <0 0x1c020000 0x0 0x1000>; + reg-shift = <2>; +- clock-frequency = <10000000>; /* Updated by bootloader */ ++ clock-frequency = <50000000>; /* Updated by bootloader */ + interrupt-parent = <&gic>; + interrupts = <0x0 0x4c 0x4>; + }; +@@ -397,5 +401,30 @@ + #clock-cells = <1>; + clocks = <&rtcclk 0>; + }; ++ ++ menet: ethernet@17020000 { ++ compatible = "apm,xgene-enet"; ++ status = "disabled"; ++ reg = <0x0 0x17020000 0x0 0xd100>, ++ <0x0 0X17030000 0x0 0X400>, ++ <0x0 0X10000000 0x0 0X200>; ++ reg-names = "enet_csr", "ring_csr", "ring_cmd"; ++ interrupts = <0x0 0x3c 0x4>; ++ dma-coherent; ++ clocks = <&menetclk 0>; ++ local-mac-address = [00 00 00 00 00 00]; ++ phy-connection-type = "rgmii"; ++ phy-handle = <&menetphy>; ++ mdio { ++ compatible = "apm,xgene-mdio"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ menetphy: menetphy@3 { ++ compatible = "ethernet-phy-id001c.c915"; ++ reg = <0x3>; ++ }; ++ ++ }; ++ }; + }; + }; +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h +index 01d3aab..8186df6 100644 +--- a/arch/arm64/include/asm/elf.h ++++ b/arch/arm64/include/asm/elf.h +@@ -114,7 +114,8 @@ typedef struct user_fpsimd_state elf_fpregset_t; + */ + #define elf_check_arch(x) ((x)->e_machine == EM_AARCH64) + +-#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X) ++#define elf_read_implies_exec(ex,stk) (test_thread_flag(TIF_32BIT) \ ++ ? (stk == EXSTACK_ENABLE_X) : 0) + + #define CORE_DUMP_USE_REGSET + #define ELF_EXEC_PAGESIZE PAGE_SIZE +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h +index 3d69030..cc83520 100644 +--- a/arch/arm64/include/asm/kvm_arm.h ++++ b/arch/arm64/include/asm/kvm_arm.h +@@ -76,9 +76,10 @@ + */ + #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ +- HCR_AMO | HCR_IMO | HCR_FMO | \ +- HCR_SWIO | HCR_TIDCP | HCR_RW) ++ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) + #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) ++#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) ++ + + /* Hyp System Control Register (SCTLR_EL2) bits */ + #define SCTLR_EL2_EE (1 << 25) +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h +index 9fcd54b..a28c35b 100644 +--- a/arch/arm64/include/asm/kvm_asm.h ++++ b/arch/arm64/include/asm/kvm_asm.h +@@ -18,6 +18,8 @@ + #ifndef __ARM_KVM_ASM_H__ + #define __ARM_KVM_ASM_H__ + ++#include ++ + /* + * 0 is reserved as an invalid value. + * Order *must* be kept in sync with the hyp switch code. +@@ -96,13 +98,21 @@ extern char __kvm_hyp_init_end[]; + + extern char __kvm_hyp_vector[]; + +-extern char __kvm_hyp_code_start[]; +-extern char __kvm_hyp_code_end[]; ++#define __kvm_hyp_code_start __hyp_text_start ++#define __kvm_hyp_code_end __hyp_text_end + + extern void __kvm_flush_vm_context(void); + extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + + extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); ++ ++extern u64 __vgic_v3_get_ich_vtr_el2(void); ++ ++extern char __save_vgic_v2_state[]; ++extern char __restore_vgic_v2_state[]; ++extern char __save_vgic_v3_state[]; ++extern char __restore_vgic_v3_state[]; ++ + #endif + + #endif /* __ARM_KVM_ASM_H__ */ +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index 92242ce..4ae9213 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -200,4 +200,32 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, + hyp_stack_ptr, vector_ptr); + } + ++struct vgic_sr_vectors { ++ void *save_vgic; ++ void *restore_vgic; ++}; ++ ++static inline void vgic_arch_setup(const struct vgic_params *vgic) ++{ ++ extern struct vgic_sr_vectors __vgic_sr_vectors; ++ ++ switch(vgic->type) ++ { ++ case VGIC_V2: ++ __vgic_sr_vectors.save_vgic = __save_vgic_v2_state; ++ __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state; ++ break; ++ ++#ifdef CONFIG_ARM_GIC_V3 ++ case VGIC_V3: ++ __vgic_sr_vectors.save_vgic = __save_vgic_v3_state; ++ __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state; ++ break; ++#endif ++ ++ default: ++ BUG(); ++ } ++} ++ + #endif /* __ARM64_KVM_HOST_H__ */ +diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h +index b9349c4..e0f37ef 100644 +--- a/arch/arm64/include/asm/tlbflush.h ++++ b/arch/arm64/include/asm/tlbflush.h +@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, + dsb(ish); + } + +-static inline void flush_tlb_range(struct vm_area_struct *vma, +- unsigned long start, unsigned long end) ++static inline void __flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) + { + unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; + unsigned long addr; +@@ -112,7 +112,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, + dsb(ish); + } + +-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++#define MAX_TLB_LOOP 128 ++ ++static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end) + { + unsigned long addr; + start >>= 12; +@@ -124,6 +126,23 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end + dsb(ish); + } + ++static inline void flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ if (((end - start) >> PAGE_SHIFT) < MAX_TLB_LOOP) ++ __flush_tlb_range(vma, start, end); ++ else ++ flush_tlb_mm(vma->vm_mm); ++} ++ ++static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++{ ++ if (((end - start) >> PAGE_SHIFT) < MAX_TLB_LOOP) ++ __flush_tlb_kernel_range(start, end); ++ else ++ flush_tlb_all(); ++} ++ + /* + * On AArch64, the cache coherency is handled via the set_pte_at() function. + */ +diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h +index 215ad46..7a5df52 100644 +--- a/arch/arm64/include/asm/virt.h ++++ b/arch/arm64/include/asm/virt.h +@@ -50,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void) + return __boot_cpu_mode[0] != __boot_cpu_mode[1]; + } + ++/* The section containing the hypervisor text */ ++extern char __hyp_text_start[]; ++extern char __hyp_text_end[]; ++ + #endif /* __ASSEMBLY__ */ + + #endif /* ! __ASM__VIRT_H */ +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c +index 646f888..e74654c 100644 +--- a/arch/arm64/kernel/asm-offsets.c ++++ b/arch/arm64/kernel/asm-offsets.c +@@ -129,13 +129,24 @@ int main(void) + DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); + DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); +- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); +- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); +- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); +- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); +- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); +- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); +- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); ++ DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic)); ++ DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic)); ++ DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors)); ++ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); ++ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); ++ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); ++ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); ++ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); ++ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); ++ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); ++ DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr)); ++ DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr)); ++ DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr)); ++ DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr)); ++ DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr)); ++ DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r)); ++ DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r)); ++ DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr)); + DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); + DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); + DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); +diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c +index 14db1f6..453b7f8 100644 +--- a/arch/arm64/kernel/efi.c ++++ b/arch/arm64/kernel/efi.c +@@ -467,3 +467,14 @@ static int __init arm64_enter_virtual_mode(void) + return 0; + } + early_initcall(arm64_enter_virtual_mode); ++ ++/* ++ * If nothing else is handling pm_power_off, use EFI ++ * ++ * This is called from a late_initcall after other mechanisms ++ * have had a chance to register a handler. ++ */ ++bool efi_poweroff_required(void) ++{ ++ return pm_power_off == NULL; ++} +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index a96d3a6..871b4ee 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -22,6 +22,7 @@ + + #include + #include ++#include + + #include + #include +@@ -296,6 +297,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 + msr cnthctl_el2, x0 + msr cntvoff_el2, xzr // Clear virtual offset + ++#ifdef CONFIG_ARM_GIC_V3 ++ /* GICv3 system register access */ ++ mrs x0, id_aa64pfr0_el1 ++ ubfx x0, x0, #24, #4 ++ cmp x0, #1 ++ b.ne 3f ++ ++ mrs x0, ICC_SRE_EL2 ++ orr x0, x0, #1 // Set ICC_SRE_EL2.SRE==1 ++ orr x0, x0, #(1 << 3) // Set ICC_SRE_EL2.Enable==1 ++ msr ICC_SRE_EL2, x0 ++ isb // Make sure SRE is now 1 ++ msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults ++ ++3: ++#endif ++ + /* Populate ID registers. */ + mrs x0, midr_el1 + mrs x1, mpidr_el1 +diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S +index 0959611..a272f33 100644 +--- a/arch/arm64/kernel/hyp-stub.S ++++ b/arch/arm64/kernel/hyp-stub.S +@@ -19,6 +19,7 @@ + + #include + #include ++#include + + #include + #include +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c +index 43b7c34..ec5cbbe 100644 +--- a/arch/arm64/kernel/process.c ++++ b/arch/arm64/kernel/process.c +@@ -43,6 +43,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -176,6 +177,11 @@ void machine_restart(char *cmd) + arm_pm_restart(reboot_mode, cmd); + + /* ++ * If all else fails, try EFI ++ */ ++ efi_reboot(reboot_mode, cmd); ++ ++ /* + * Whoops - the architecture was unable to reboot. + */ + printk("Reboot failed -- System halted\n"); +diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile +index 72a9fd5..32a0961 100644 +--- a/arch/arm64/kvm/Makefile ++++ b/arch/arm64/kvm/Makefile +@@ -20,4 +20,8 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o + kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o + + kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o ++kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o ++kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o ++kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o ++kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o + kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o +diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S +index b0d1512..5945f3b 100644 +--- a/arch/arm64/kvm/hyp.S ++++ b/arch/arm64/kvm/hyp.S +@@ -16,7 +16,6 @@ + */ + + #include +-#include + + #include + #include +@@ -36,9 +35,6 @@ + .pushsection .hyp.text, "ax" + .align PAGE_SHIFT + +-__kvm_hyp_code_start: +- .globl __kvm_hyp_code_start +- + .macro save_common_regs + // x2: base address for cpu context + // x3: tmp register +@@ -339,11 +335,8 @@ __kvm_hyp_code_start: + .endm + + .macro activate_traps +- ldr x2, [x0, #VCPU_IRQ_LINES] +- ldr x1, [x0, #VCPU_HCR_EL2] +- orr x2, x2, x1 +- msr hcr_el2, x2 +- ++ ldr x2, [x0, #VCPU_HCR_EL2] ++ msr hcr_el2, x2 + ldr x2, =(CPTR_EL2_TTA) + msr cptr_el2, x2 + +@@ -379,100 +372,33 @@ __kvm_hyp_code_start: + .endm + + /* +- * Save the VGIC CPU state into memory +- * x0: Register pointing to VCPU struct +- * Do not corrupt x1!!! ++ * Call into the vgic backend for state saving + */ + .macro save_vgic_state +- /* Get VGIC VCTRL base into x2 */ +- ldr x2, [x0, #VCPU_KVM] +- kern_hyp_va x2 +- ldr x2, [x2, #KVM_VGIC_VCTRL] +- kern_hyp_va x2 +- cbz x2, 2f // disabled +- +- /* Compute the address of struct vgic_cpu */ +- add x3, x0, #VCPU_VGIC_CPU +- +- /* Save all interesting registers */ +- ldr w4, [x2, #GICH_HCR] +- ldr w5, [x2, #GICH_VMCR] +- ldr w6, [x2, #GICH_MISR] +- ldr w7, [x2, #GICH_EISR0] +- ldr w8, [x2, #GICH_EISR1] +- ldr w9, [x2, #GICH_ELRSR0] +- ldr w10, [x2, #GICH_ELRSR1] +- ldr w11, [x2, #GICH_APR] +-CPU_BE( rev w4, w4 ) +-CPU_BE( rev w5, w5 ) +-CPU_BE( rev w6, w6 ) +-CPU_BE( rev w7, w7 ) +-CPU_BE( rev w8, w8 ) +-CPU_BE( rev w9, w9 ) +-CPU_BE( rev w10, w10 ) +-CPU_BE( rev w11, w11 ) +- +- str w4, [x3, #VGIC_CPU_HCR] +- str w5, [x3, #VGIC_CPU_VMCR] +- str w6, [x3, #VGIC_CPU_MISR] +- str w7, [x3, #VGIC_CPU_EISR] +- str w8, [x3, #(VGIC_CPU_EISR + 4)] +- str w9, [x3, #VGIC_CPU_ELRSR] +- str w10, [x3, #(VGIC_CPU_ELRSR + 4)] +- str w11, [x3, #VGIC_CPU_APR] +- +- /* Clear GICH_HCR */ +- str wzr, [x2, #GICH_HCR] +- +- /* Save list registers */ +- add x2, x2, #GICH_LR0 +- ldr w4, [x3, #VGIC_CPU_NR_LR] +- add x3, x3, #VGIC_CPU_LR +-1: ldr w5, [x2], #4 +-CPU_BE( rev w5, w5 ) +- str w5, [x3], #4 +- sub w4, w4, #1 +- cbnz w4, 1b +-2: ++ adr x24, __vgic_sr_vectors ++ ldr x24, [x24, VGIC_SAVE_FN] ++ kern_hyp_va x24 ++ blr x24 ++ mrs x24, hcr_el2 ++ mov x25, #HCR_INT_OVERRIDE ++ neg x25, x25 ++ and x24, x24, x25 ++ msr hcr_el2, x24 + .endm + + /* +- * Restore the VGIC CPU state from memory +- * x0: Register pointing to VCPU struct ++ * Call into the vgic backend for state restoring + */ + .macro restore_vgic_state +- /* Get VGIC VCTRL base into x2 */ +- ldr x2, [x0, #VCPU_KVM] +- kern_hyp_va x2 +- ldr x2, [x2, #KVM_VGIC_VCTRL] +- kern_hyp_va x2 +- cbz x2, 2f // disabled +- +- /* Compute the address of struct vgic_cpu */ +- add x3, x0, #VCPU_VGIC_CPU +- +- /* We only restore a minimal set of registers */ +- ldr w4, [x3, #VGIC_CPU_HCR] +- ldr w5, [x3, #VGIC_CPU_VMCR] +- ldr w6, [x3, #VGIC_CPU_APR] +-CPU_BE( rev w4, w4 ) +-CPU_BE( rev w5, w5 ) +-CPU_BE( rev w6, w6 ) +- +- str w4, [x2, #GICH_HCR] +- str w5, [x2, #GICH_VMCR] +- str w6, [x2, #GICH_APR] +- +- /* Restore list registers */ +- add x2, x2, #GICH_LR0 +- ldr w4, [x3, #VGIC_CPU_NR_LR] +- add x3, x3, #VGIC_CPU_LR +-1: ldr w5, [x3], #4 +-CPU_BE( rev w5, w5 ) +- str w5, [x2], #4 +- sub w4, w4, #1 +- cbnz w4, 1b +-2: ++ mrs x24, hcr_el2 ++ ldr x25, [x0, #VCPU_IRQ_LINES] ++ orr x24, x24, #HCR_INT_OVERRIDE ++ orr x24, x24, x25 ++ msr hcr_el2, x24 ++ adr x24, __vgic_sr_vectors ++ ldr x24, [x24, #VGIC_RESTORE_FN] ++ kern_hyp_va x24 ++ blr x24 + .endm + + .macro save_timer_state +@@ -653,6 +579,12 @@ ENTRY(__kvm_flush_vm_context) + ret + ENDPROC(__kvm_flush_vm_context) + ++ // struct vgic_sr_vectors __vgi_sr_vectors; ++ .align 3 ++ENTRY(__vgic_sr_vectors) ++ .skip VGIC_SR_VECTOR_SZ ++ENDPROC(__vgic_sr_vectors) ++ + __kvm_hyp_panic: + // Guess the context by looking at VTTBR: + // If zero, then we're already a host. +@@ -880,7 +812,4 @@ ENTRY(__kvm_hyp_vector) + ventry el1_error_invalid // Error 32-bit EL1 + ENDPROC(__kvm_hyp_vector) + +-__kvm_hyp_code_end: +- .globl __kvm_hyp_code_end +- + .popsection +diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S +new file mode 100644 +index 0000000..ae21177 +--- /dev/null ++++ b/arch/arm64/kvm/vgic-v2-switch.S +@@ -0,0 +1,133 @@ ++/* ++ * Copyright (C) 2012,2013 - ARM Ltd ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ .text ++ .pushsection .hyp.text, "ax" ++ ++/* ++ * Save the VGIC CPU state into memory ++ * x0: Register pointing to VCPU struct ++ * Do not corrupt x1!!! ++ */ ++ENTRY(__save_vgic_v2_state) ++__save_vgic_v2_state: ++ /* Get VGIC VCTRL base into x2 */ ++ ldr x2, [x0, #VCPU_KVM] ++ kern_hyp_va x2 ++ ldr x2, [x2, #KVM_VGIC_VCTRL] ++ kern_hyp_va x2 ++ cbz x2, 2f // disabled ++ ++ /* Compute the address of struct vgic_cpu */ ++ add x3, x0, #VCPU_VGIC_CPU ++ ++ /* Save all interesting registers */ ++ ldr w4, [x2, #GICH_HCR] ++ ldr w5, [x2, #GICH_VMCR] ++ ldr w6, [x2, #GICH_MISR] ++ ldr w7, [x2, #GICH_EISR0] ++ ldr w8, [x2, #GICH_EISR1] ++ ldr w9, [x2, #GICH_ELRSR0] ++ ldr w10, [x2, #GICH_ELRSR1] ++ ldr w11, [x2, #GICH_APR] ++CPU_BE( rev w4, w4 ) ++CPU_BE( rev w5, w5 ) ++CPU_BE( rev w6, w6 ) ++CPU_BE( rev w7, w7 ) ++CPU_BE( rev w8, w8 ) ++CPU_BE( rev w9, w9 ) ++CPU_BE( rev w10, w10 ) ++CPU_BE( rev w11, w11 ) ++ ++ str w4, [x3, #VGIC_V2_CPU_HCR] ++ str w5, [x3, #VGIC_V2_CPU_VMCR] ++ str w6, [x3, #VGIC_V2_CPU_MISR] ++ str w7, [x3, #VGIC_V2_CPU_EISR] ++ str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] ++ str w9, [x3, #VGIC_V2_CPU_ELRSR] ++ str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ++ str w11, [x3, #VGIC_V2_CPU_APR] ++ ++ /* Clear GICH_HCR */ ++ str wzr, [x2, #GICH_HCR] ++ ++ /* Save list registers */ ++ add x2, x2, #GICH_LR0 ++ ldr w4, [x3, #VGIC_CPU_NR_LR] ++ add x3, x3, #VGIC_V2_CPU_LR ++1: ldr w5, [x2], #4 ++CPU_BE( rev w5, w5 ) ++ str w5, [x3], #4 ++ sub w4, w4, #1 ++ cbnz w4, 1b ++2: ++ ret ++ENDPROC(__save_vgic_v2_state) ++ ++/* ++ * Restore the VGIC CPU state from memory ++ * x0: Register pointing to VCPU struct ++ */ ++ENTRY(__restore_vgic_v2_state) ++__restore_vgic_v2_state: ++ /* Get VGIC VCTRL base into x2 */ ++ ldr x2, [x0, #VCPU_KVM] ++ kern_hyp_va x2 ++ ldr x2, [x2, #KVM_VGIC_VCTRL] ++ kern_hyp_va x2 ++ cbz x2, 2f // disabled ++ ++ /* Compute the address of struct vgic_cpu */ ++ add x3, x0, #VCPU_VGIC_CPU ++ ++ /* We only restore a minimal set of registers */ ++ ldr w4, [x3, #VGIC_V2_CPU_HCR] ++ ldr w5, [x3, #VGIC_V2_CPU_VMCR] ++ ldr w6, [x3, #VGIC_V2_CPU_APR] ++CPU_BE( rev w4, w4 ) ++CPU_BE( rev w5, w5 ) ++CPU_BE( rev w6, w6 ) ++ ++ str w4, [x2, #GICH_HCR] ++ str w5, [x2, #GICH_VMCR] ++ str w6, [x2, #GICH_APR] ++ ++ /* Restore list registers */ ++ add x2, x2, #GICH_LR0 ++ ldr w4, [x3, #VGIC_CPU_NR_LR] ++ add x3, x3, #VGIC_V2_CPU_LR ++1: ldr w5, [x3], #4 ++CPU_BE( rev w5, w5 ) ++ str w5, [x2], #4 ++ sub w4, w4, #1 ++ cbnz w4, 1b ++2: ++ ret ++ENDPROC(__restore_vgic_v2_state) ++ ++ .popsection +diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S +new file mode 100644 +index 0000000..4ede9d8 +--- /dev/null ++++ b/arch/arm64/kvm/vgic-v3-switch.S +@@ -0,0 +1,266 @@ ++/* ++ * Copyright (C) 2012,2013 - ARM Ltd ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ .text ++ .pushsection .hyp.text, "ax" ++ ++/* ++ * We store LRs in reverse order to let the CPU deal with streaming ++ * access. Use this macro to make it look saner... ++ */ ++#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8) ++ ++/* ++ * Save the VGIC CPU state into memory ++ * x0: Register pointing to VCPU struct ++ * Do not corrupt x1!!! ++ */ ++.macro save_vgic_v3_state ++ // Compute the address of struct vgic_cpu ++ add x3, x0, #VCPU_VGIC_CPU ++ ++ // Make sure stores to the GIC via the memory mapped interface ++ // are now visible to the system register interface ++ dsb st ++ ++ // Save all interesting registers ++ mrs x4, ICH_HCR_EL2 ++ mrs x5, ICH_VMCR_EL2 ++ mrs x6, ICH_MISR_EL2 ++ mrs x7, ICH_EISR_EL2 ++ mrs x8, ICH_ELSR_EL2 ++ ++ str w4, [x3, #VGIC_V3_CPU_HCR] ++ str w5, [x3, #VGIC_V3_CPU_VMCR] ++ str w6, [x3, #VGIC_V3_CPU_MISR] ++ str w7, [x3, #VGIC_V3_CPU_EISR] ++ str w8, [x3, #VGIC_V3_CPU_ELRSR] ++ ++ msr ICH_HCR_EL2, xzr ++ ++ mrs x21, ICH_VTR_EL2 ++ mvn w22, w21 ++ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 ++ ++ adr x24, 1f ++ add x24, x24, x23 ++ br x24 ++ ++1: ++ mrs x20, ICH_LR15_EL2 ++ mrs x19, ICH_LR14_EL2 ++ mrs x18, ICH_LR13_EL2 ++ mrs x17, ICH_LR12_EL2 ++ mrs x16, ICH_LR11_EL2 ++ mrs x15, ICH_LR10_EL2 ++ mrs x14, ICH_LR9_EL2 ++ mrs x13, ICH_LR8_EL2 ++ mrs x12, ICH_LR7_EL2 ++ mrs x11, ICH_LR6_EL2 ++ mrs x10, ICH_LR5_EL2 ++ mrs x9, ICH_LR4_EL2 ++ mrs x8, ICH_LR3_EL2 ++ mrs x7, ICH_LR2_EL2 ++ mrs x6, ICH_LR1_EL2 ++ mrs x5, ICH_LR0_EL2 ++ ++ adr x24, 1f ++ add x24, x24, x23 ++ br x24 ++ ++1: ++ str x20, [x3, #LR_OFFSET(15)] ++ str x19, [x3, #LR_OFFSET(14)] ++ str x18, [x3, #LR_OFFSET(13)] ++ str x17, [x3, #LR_OFFSET(12)] ++ str x16, [x3, #LR_OFFSET(11)] ++ str x15, [x3, #LR_OFFSET(10)] ++ str x14, [x3, #LR_OFFSET(9)] ++ str x13, [x3, #LR_OFFSET(8)] ++ str x12, [x3, #LR_OFFSET(7)] ++ str x11, [x3, #LR_OFFSET(6)] ++ str x10, [x3, #LR_OFFSET(5)] ++ str x9, [x3, #LR_OFFSET(4)] ++ str x8, [x3, #LR_OFFSET(3)] ++ str x7, [x3, #LR_OFFSET(2)] ++ str x6, [x3, #LR_OFFSET(1)] ++ str x5, [x3, #LR_OFFSET(0)] ++ ++ tbnz w21, #29, 6f // 6 bits ++ tbz w21, #30, 5f // 5 bits ++ // 7 bits ++ mrs x20, ICH_AP0R3_EL2 ++ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] ++ mrs x19, ICH_AP0R2_EL2 ++ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] ++6: mrs x18, ICH_AP0R1_EL2 ++ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] ++5: mrs x17, ICH_AP0R0_EL2 ++ str w17, [x3, #VGIC_V3_CPU_AP0R] ++ ++ tbnz w21, #29, 6f // 6 bits ++ tbz w21, #30, 5f // 5 bits ++ // 7 bits ++ mrs x20, ICH_AP1R3_EL2 ++ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] ++ mrs x19, ICH_AP1R2_EL2 ++ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] ++6: mrs x18, ICH_AP1R1_EL2 ++ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] ++5: mrs x17, ICH_AP1R0_EL2 ++ str w17, [x3, #VGIC_V3_CPU_AP1R] ++ ++ // Restore SRE_EL1 access and re-enable SRE at EL1. ++ mrs x5, ICC_SRE_EL2 ++ orr x5, x5, #ICC_SRE_EL2_ENABLE ++ msr ICC_SRE_EL2, x5 ++ isb ++ mov x5, #1 ++ msr ICC_SRE_EL1, x5 ++.endm ++ ++/* ++ * Restore the VGIC CPU state from memory ++ * x0: Register pointing to VCPU struct ++ */ ++.macro restore_vgic_v3_state ++ // Disable SRE_EL1 access. Necessary, otherwise ++ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens... ++ msr ICC_SRE_EL1, xzr ++ isb ++ ++ // Compute the address of struct vgic_cpu ++ add x3, x0, #VCPU_VGIC_CPU ++ ++ // Restore all interesting registers ++ ldr w4, [x3, #VGIC_V3_CPU_HCR] ++ ldr w5, [x3, #VGIC_V3_CPU_VMCR] ++ ++ msr ICH_HCR_EL2, x4 ++ msr ICH_VMCR_EL2, x5 ++ ++ mrs x21, ICH_VTR_EL2 ++ ++ tbnz w21, #29, 6f // 6 bits ++ tbz w21, #30, 5f // 5 bits ++ // 7 bits ++ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] ++ msr ICH_AP1R3_EL2, x20 ++ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] ++ msr ICH_AP1R2_EL2, x19 ++6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] ++ msr ICH_AP1R1_EL2, x18 ++5: ldr w17, [x3, #VGIC_V3_CPU_AP1R] ++ msr ICH_AP1R0_EL2, x17 ++ ++ tbnz w21, #29, 6f // 6 bits ++ tbz w21, #30, 5f // 5 bits ++ // 7 bits ++ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] ++ msr ICH_AP0R3_EL2, x20 ++ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] ++ msr ICH_AP0R2_EL2, x19 ++6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] ++ msr ICH_AP0R1_EL2, x18 ++5: ldr w17, [x3, #VGIC_V3_CPU_AP0R] ++ msr ICH_AP0R0_EL2, x17 ++ ++ and w22, w21, #0xf ++ mvn w22, w21 ++ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 ++ ++ adr x24, 1f ++ add x24, x24, x23 ++ br x24 ++ ++1: ++ ldr x20, [x3, #LR_OFFSET(15)] ++ ldr x19, [x3, #LR_OFFSET(14)] ++ ldr x18, [x3, #LR_OFFSET(13)] ++ ldr x17, [x3, #LR_OFFSET(12)] ++ ldr x16, [x3, #LR_OFFSET(11)] ++ ldr x15, [x3, #LR_OFFSET(10)] ++ ldr x14, [x3, #LR_OFFSET(9)] ++ ldr x13, [x3, #LR_OFFSET(8)] ++ ldr x12, [x3, #LR_OFFSET(7)] ++ ldr x11, [x3, #LR_OFFSET(6)] ++ ldr x10, [x3, #LR_OFFSET(5)] ++ ldr x9, [x3, #LR_OFFSET(4)] ++ ldr x8, [x3, #LR_OFFSET(3)] ++ ldr x7, [x3, #LR_OFFSET(2)] ++ ldr x6, [x3, #LR_OFFSET(1)] ++ ldr x5, [x3, #LR_OFFSET(0)] ++ ++ adr x24, 1f ++ add x24, x24, x23 ++ br x24 ++ ++1: ++ msr ICH_LR15_EL2, x20 ++ msr ICH_LR14_EL2, x19 ++ msr ICH_LR13_EL2, x18 ++ msr ICH_LR12_EL2, x17 ++ msr ICH_LR11_EL2, x16 ++ msr ICH_LR10_EL2, x15 ++ msr ICH_LR9_EL2, x14 ++ msr ICH_LR8_EL2, x13 ++ msr ICH_LR7_EL2, x12 ++ msr ICH_LR6_EL2, x11 ++ msr ICH_LR5_EL2, x10 ++ msr ICH_LR4_EL2, x9 ++ msr ICH_LR3_EL2, x8 ++ msr ICH_LR2_EL2, x7 ++ msr ICH_LR1_EL2, x6 ++ msr ICH_LR0_EL2, x5 ++ ++ // Ensure that the above will be visible via the memory-mapped ++ // view of the CPU interface (GICV). ++ isb ++ dsb sy ++ ++ // Prevent the guest from touching the GIC system registers ++ mrs x5, ICC_SRE_EL2 ++ and x5, x5, #~ICC_SRE_EL2_ENABLE ++ msr ICC_SRE_EL2, x5 ++.endm ++ ++ENTRY(__save_vgic_v3_state) ++ save_vgic_v3_state ++ ret ++ENDPROC(__save_vgic_v3_state) ++ ++ENTRY(__restore_vgic_v3_state) ++ restore_vgic_v3_state ++ ret ++ENDPROC(__restore_vgic_v3_state) ++ ++ENTRY(__vgic_v3_get_ich_vtr_el2) ++ mrs x0, ICH_VTR_EL2 ++ ret ++ENDPROC(__vgic_v3_get_ich_vtr_el2) ++ ++ .popsection +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index f43db8a..05d6079 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -145,8 +145,17 @@ void __init arm64_memblock_init(void) + early_init_fdt_scan_reserved_mem(); + + /* 4GB maximum for 32-bit only capable devices */ +- if (IS_ENABLED(CONFIG_ZONE_DMA)) ++ if (IS_ENABLED(CONFIG_ZONE_DMA)) { + dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; ++ /* ++ * If platform doesn't have DRAM within the dma_phys_limit, ++ * remove the limit altogether. This allows one kernel (with ++ * CONFIG_ZONE_DMA defined) to support platforms with 32-bit ++ * only devices and platforms with no 32-bit DRAM. ++ */ ++ if (dma_phys_limit <= memblock_start_of_DRAM()) ++ dma_phys_limit = 0; ++ } + dma_contiguous_reserve(dma_phys_limit); + + memblock_allow_resize(); +diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c +index 55d4ba4..deed6fa 100644 +--- a/arch/ia64/kernel/process.c ++++ b/arch/ia64/kernel/process.c +@@ -662,7 +662,7 @@ void + machine_restart (char *restart_cmd) + { + (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); +- (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); ++ efi_reboot(REBOOT_WARM, NULL); + } + + void +diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c +index 71c52bc..a149c67 100644 +--- a/arch/ia64/kernel/time.c ++++ b/arch/ia64/kernel/time.c +@@ -384,21 +384,6 @@ static struct irqaction timer_irqaction = { + .name = "timer" + }; + +-static struct platform_device rtc_efi_dev = { +- .name = "rtc-efi", +- .id = -1, +-}; +- +-static int __init rtc_init(void) +-{ +- if (platform_device_register(&rtc_efi_dev) < 0) +- printk(KERN_ERR "unable to register rtc device...\n"); +- +- /* not necessarily an error */ +- return 0; +-} +-module_init(rtc_init); +- + void read_persistent_clock(struct timespec *ts) + { + efi_gettimeofday(ts); +diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h +index 1eb5f64..da50c586a 100644 +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -156,6 +156,8 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( + return EFI_SUCCESS; + } + #endif /* CONFIG_EFI_MIXED */ ++ ++extern bool efi_reboot_required(void); + #else + /* + * IF EFI is not configured, have the EFI calls return -ENOSYS. +@@ -168,6 +170,10 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( + #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) + #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) + static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} ++static inline bool efi_reboot_required(void) ++{ ++ return false; ++} + #endif /* CONFIG_EFI */ + + #endif /* _ASM_X86_EFI_H */ +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index 52b1157..17962e6 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + /* + * Power off function, if any +@@ -401,12 +402,25 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { + + static int __init reboot_init(void) + { ++ int rv; ++ + /* + * Only do the DMI check if reboot_type hasn't been overridden + * on the command line + */ +- if (reboot_default) +- dmi_check_system(reboot_dmi_table); ++ if (!reboot_default) ++ return 0; ++ ++ /* ++ * The DMI quirks table takes precedence. If no quirks entry ++ * matches and the ACPI Hardware Reduced bit is set, force EFI ++ * reboot. ++ */ ++ rv = dmi_check_system(reboot_dmi_table); ++ ++ if (!rv && efi_reboot_required()) ++ reboot_type = BOOT_EFI; ++ + return 0; + } + core_initcall(reboot_init); +@@ -528,11 +542,7 @@ static void native_machine_emergency_restart(void) + break; + + case BOOT_EFI: +- if (efi_enabled(EFI_RUNTIME_SERVICES)) +- efi.reset_system(reboot_mode == REBOOT_WARM ? +- EFI_RESET_WARM : +- EFI_RESET_COLD, +- EFI_SUCCESS, 0, NULL); ++ efi_reboot(reboot_mode, NULL); + reboot_type = BOOT_BIOS; + break; + +diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h +index 05882e4..1db67a7 100644 +--- a/drivers/ata/ahci.h ++++ b/drivers/ata/ahci.h +@@ -373,6 +373,8 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, + + int ahci_stop_engine(struct ata_port *ap); + void ahci_start_engine(struct ata_port *ap); ++int ahci_restart_engine(struct ata_port *ap); ++void ahci_sw_activity(struct ata_link *link); + int ahci_check_ready(struct ata_link *link); + int ahci_kick_engine(struct ata_port *ap); + int ahci_port_resume(struct ata_port *ap); +diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c +index 042a9bb..81fbdc9 100644 +--- a/drivers/ata/ahci_xgene.c ++++ b/drivers/ata/ahci_xgene.c +@@ -78,6 +78,7 @@ + struct xgene_ahci_context { + struct ahci_host_priv *hpriv; + struct device *dev; ++ u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command */ + void __iomem *csr_core; /* Core CSR address of IP */ + void __iomem *csr_diag; /* Diag CSR address of IP */ + void __iomem *csr_axi; /* AXI CSR address of IP */ +@@ -98,20 +99,72 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx) + } + + /** ++ * xgene_ahci_qc_issue - Issue commands to the device ++ * @qc: Command to issue ++ * ++ * Due to H/W errata, for the IENTIFY DEVICE command ++ * controller is unable to clear the BSY bit after ++ * receiving the PIO setup FIS and results the dma ++ * state machine to go into the CMFatalErrorUpdate ++ * state resulting in the dma state machine lockup. ++ * By restarting the dma engine to move it removes ++ * the controller out of lock up state. ++ */ ++static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) ++{ ++ struct ata_port *ap = qc->ap; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ struct xgene_ahci_context *ctx = hpriv->plat_data; ++ ++ /* Keep track of the currently active link. It will be used ++ * in completion path to determine whether NCQ phase is in ++ * progress. ++ */ ++ pp->active_link = qc->dev->link; ++ ++ /* ++ * Restart the dma engine if the last cmd issued ++ * is IDENTIFY DEVICE command ++ */ ++ if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) ++ ahci_restart_engine(ap); ++ ++ if (qc->tf.protocol == ATA_PROT_NCQ) ++ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); ++ ++ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { ++ u32 fbs = readl(port_mmio + PORT_FBS); ++ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); ++ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; ++ writel(fbs, port_mmio + PORT_FBS); ++ pp->fbs_last_dev = qc->dev->link->pmp; ++ } ++ ++ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); ++ ++ /* Save the last command issued */ ++ ctx->last_cmd[ap->port_no] = qc->tf.command; ++ ++ ahci_sw_activity(qc->dev->link); ++ ++ return 0; ++} ++ ++/** + * xgene_ahci_read_id - Read ID data from the specified device + * @dev: device + * @tf: proposed taskfile + * @id: data buffer + * + * This custom read ID function is required due to the fact that the HW +- * does not support DEVSLP and the controller state machine may get stuck +- * after processing the ID query command. ++ * does not support DEVSLP. + */ + static unsigned int xgene_ahci_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id) + { + u32 err_mask; +- void __iomem *port_mmio = ahci_port_base(dev->link->ap); + + err_mask = ata_do_dev_read_id(dev, tf, id); + if (err_mask) +@@ -133,16 +186,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev, + */ + id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); + +- /* +- * Due to HW errata, restart the port if no other command active. +- * Otherwise the controller may get stuck. +- */ +- if (!readl(port_mmio + PORT_CMD_ISSUE)) { +- writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD); +- readl(port_mmio + PORT_CMD); /* Force a barrier */ +- writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD); +- readl(port_mmio + PORT_CMD); /* Force a barrier */ +- } + return 0; + } + +@@ -300,6 +343,7 @@ static struct ata_port_operations xgene_ahci_ops = { + .host_stop = xgene_ahci_host_stop, + .hardreset = xgene_ahci_hardreset, + .read_id = xgene_ahci_read_id, ++ .qc_issue = xgene_ahci_qc_issue, + }; + + static const struct ata_port_info xgene_ahci_port_info = { +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index 40ea583..3ec5dc7 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -747,6 +747,18 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + return 0; + } + ++int ahci_restart_engine(struct ata_port *ap) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ ++ ahci_stop_engine(ap); ++ ahci_start_fis_rx(ap); ++ hpriv->start_engine(ap); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(ahci_restart_engine); ++ + #ifdef CONFIG_PM + static void ahci_power_down(struct ata_port *ap) + { +@@ -886,7 +898,7 @@ int ahci_reset_controller(struct ata_host *host) + } + EXPORT_SYMBOL_GPL(ahci_reset_controller); + +-static void ahci_sw_activity(struct ata_link *link) ++void ahci_sw_activity(struct ata_link *link) + { + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; +@@ -899,6 +911,7 @@ static void ahci_sw_activity(struct ata_link *link) + if (!timer_pending(&emp->timer)) + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); + } ++EXPORT_SYMBOL_GPL(ahci_sw_activity); + + static void ahci_sw_activity_blink(unsigned long arg) + { +diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile +index 9553496..c135154 100644 +--- a/drivers/firmware/efi/Makefile ++++ b/drivers/firmware/efi/Makefile +@@ -1,7 +1,7 @@ + # + # Makefile for linux kernel + # +-obj-$(CONFIG_EFI) += efi.o vars.o ++obj-$(CONFIG_EFI) += efi.o vars.o reboot.o + obj-$(CONFIG_EFI_VARS) += efivars.o + obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o + obj-$(CONFIG_UEFI_CPER) += cper.o +diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c +new file mode 100644 +index 0000000..f94fb95 +--- /dev/null ++++ b/drivers/firmware/efi/reboot.c +@@ -0,0 +1,55 @@ ++/* ++ * Copyright (C) 2014 Intel Corporation; author Matt Fleming ++ */ ++#include ++#include ++ ++int efi_reboot_quirk_mode = -1; ++ ++void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) ++{ ++ int efi_mode; ++ ++ if (!efi_enabled(EFI_RUNTIME_SERVICES)) ++ return; ++ ++ switch (reboot_mode) { ++ case REBOOT_WARM: ++ case REBOOT_SOFT: ++ efi_mode = EFI_RESET_WARM; ++ break; ++ default: ++ efi_mode = EFI_RESET_COLD; ++ break; ++ } ++ ++ /* ++ * If a quirk forced an EFI reset mode, always use that. ++ */ ++ if (efi_reboot_quirk_mode != -1) ++ efi_mode = efi_reboot_quirk_mode; ++ ++ efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL); ++} ++ ++bool __weak efi_poweroff_required(void) ++{ ++ return false; ++} ++ ++static void efi_power_off(void) ++{ ++ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); ++} ++ ++static int __init efi_shutdown_init(void) ++{ ++ if (!efi_enabled(EFI_RUNTIME_SERVICES)) ++ return -ENODEV; ++ ++ if (efi_poweroff_required()) ++ pm_power_off = efi_power_off; ++ ++ return 0; ++} ++late_initcall(efi_shutdown_init); +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index bbb746e..7f0c2a3 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -10,6 +10,11 @@ config ARM_GIC + config GIC_NON_BANKED + bool + ++config ARM_GIC_V3 ++ bool ++ select IRQ_DOMAIN ++ select MULTI_IRQ_HANDLER ++ + config ARM_NVIC + bool + select IRQ_DOMAIN +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +index 62a13e5..c57e642 100644 +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -15,7 +15,8 @@ obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o + obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o + obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o + obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o +-obj-$(CONFIG_ARM_GIC) += irq-gic.o ++obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o ++obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o + obj-$(CONFIG_ARM_NVIC) += irq-nvic.o + obj-$(CONFIG_ARM_VIC) += irq-vic.o + obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o +diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c +new file mode 100644 +index 0000000..60ac704 +--- /dev/null ++++ b/drivers/irqchip/irq-gic-common.c +@@ -0,0 +1,115 @@ ++/* ++ * Copyright (C) 2002 ARM Limited, All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "irq-gic-common.h" ++ ++void gic_configure_irq(unsigned int irq, unsigned int type, ++ void __iomem *base, void (*sync_access)(void)) ++{ ++ u32 enablemask = 1 << (irq % 32); ++ u32 enableoff = (irq / 32) * 4; ++ u32 confmask = 0x2 << ((irq % 16) * 2); ++ u32 confoff = (irq / 16) * 4; ++ bool enabled = false; ++ u32 val; ++ ++ /* ++ * Read current configuration register, and insert the config ++ * for "irq", depending on "type". ++ */ ++ val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); ++ if (type == IRQ_TYPE_LEVEL_HIGH) ++ val &= ~confmask; ++ else if (type == IRQ_TYPE_EDGE_RISING) ++ val |= confmask; ++ ++ /* ++ * As recommended by the spec, disable the interrupt before changing ++ * the configuration ++ */ ++ if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { ++ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); ++ if (sync_access) ++ sync_access(); ++ enabled = true; ++ } ++ ++ /* ++ * Write back the new configuration, and possibly re-enable ++ * the interrupt. ++ */ ++ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); ++ ++ if (enabled) ++ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); ++ ++ if (sync_access) ++ sync_access(); ++} ++ ++void __init gic_dist_config(void __iomem *base, int gic_irqs, ++ void (*sync_access)(void)) ++{ ++ unsigned int i; ++ ++ /* ++ * Set all global interrupts to be level triggered, active low. ++ */ ++ for (i = 32; i < gic_irqs; i += 16) ++ writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4); ++ ++ /* ++ * Set priority on all global interrupts. ++ */ ++ for (i = 32; i < gic_irqs; i += 4) ++ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i); ++ ++ /* ++ * Disable all interrupts. Leave the PPI and SGIs alone ++ * as they are enabled by redistributor registers. ++ */ ++ for (i = 32; i < gic_irqs; i += 32) ++ writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8); ++ ++ if (sync_access) ++ sync_access(); ++} ++ ++void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) ++{ ++ int i; ++ ++ /* ++ * Deal with the banked PPI and SGI interrupts - disable all ++ * PPI interrupts, ensure all SGI interrupts are enabled. ++ */ ++ writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR); ++ writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET); ++ ++ /* ++ * Set priority on PPI and SGI interrupts ++ */ ++ for (i = 0; i < 32; i += 4) ++ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); ++ ++ if (sync_access) ++ sync_access(); ++} +diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h +new file mode 100644 +index 0000000..b41f024 +--- /dev/null ++++ b/drivers/irqchip/irq-gic-common.h +@@ -0,0 +1,29 @@ ++/* ++ * Copyright (C) 2002 ARM Limited, All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#ifndef _IRQ_GIC_COMMON_H ++#define _IRQ_GIC_COMMON_H ++ ++#include ++#include ++ ++void gic_configure_irq(unsigned int irq, unsigned int type, ++ void __iomem *base, void (*sync_access)(void)); ++void gic_dist_config(void __iomem *base, int gic_irqs, ++ void (*sync_access)(void)); ++void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); ++ ++#endif /* _IRQ_GIC_COMMON_H */ +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +new file mode 100644 +index 0000000..c3dd8ad +--- /dev/null ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -0,0 +1,690 @@ ++/* ++ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++#include "irq-gic-common.h" ++#include "irqchip.h" ++ ++struct gic_chip_data { ++ void __iomem *dist_base; ++ void __iomem **redist_base; ++ void __percpu __iomem **rdist; ++ struct irq_domain *domain; ++ u64 redist_stride; ++ u32 redist_regions; ++ unsigned int irq_nr; ++}; ++ ++static struct gic_chip_data gic_data __read_mostly; ++ ++#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) ++#define gic_data_rdist_rd_base() (*gic_data_rdist()) ++#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) ++ ++#define DEFAULT_PMR_VALUE 0xf0 ++ ++static inline unsigned int gic_irq(struct irq_data *d) ++{ ++ return d->hwirq; ++} ++ ++static inline int gic_irq_in_rdist(struct irq_data *d) ++{ ++ return gic_irq(d) < 32; ++} ++ ++static inline void __iomem *gic_dist_base(struct irq_data *d) ++{ ++ if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ ++ return gic_data_rdist_sgi_base(); ++ ++ if (d->hwirq <= 1023) /* SPI -> dist_base */ ++ return gic_data.dist_base; ++ ++ if (d->hwirq >= 8192) ++ BUG(); /* LPI Detected!!! */ ++ ++ return NULL; ++} ++ ++static void gic_do_wait_for_rwp(void __iomem *base) ++{ ++ u32 count = 1000000; /* 1s! */ ++ ++ while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { ++ count--; ++ if (!count) { ++ pr_err_ratelimited("RWP timeout, gone fishing\n"); ++ return; ++ } ++ cpu_relax(); ++ udelay(1); ++ }; ++} ++ ++/* Wait for completion of a distributor change */ ++static void gic_dist_wait_for_rwp(void) ++{ ++ gic_do_wait_for_rwp(gic_data.dist_base); ++} ++ ++/* Wait for completion of a redistributor change */ ++static void gic_redist_wait_for_rwp(void) ++{ ++ gic_do_wait_for_rwp(gic_data_rdist_rd_base()); ++} ++ ++/* Low level accessors */ ++static u64 gic_read_iar(void) ++{ ++ u64 irqstat; ++ ++ asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); ++ return irqstat; ++} ++ ++static void gic_write_pmr(u64 val) ++{ ++ asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); ++} ++ ++static void gic_write_ctlr(u64 val) ++{ ++ asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); ++ isb(); ++} ++ ++static void gic_write_grpen1(u64 val) ++{ ++ asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); ++ isb(); ++} ++ ++static void gic_write_sgi1r(u64 val) ++{ ++ asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); ++} ++ ++static void gic_enable_sre(void) ++{ ++ u64 val; ++ ++ asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); ++ val |= ICC_SRE_EL1_SRE; ++ asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val)); ++ isb(); ++ ++ /* ++ * Need to check that the SRE bit has actually been set. If ++ * not, it means that SRE is disabled at EL2. We're going to ++ * die painfully, and there is nothing we can do about it. ++ * ++ * Kindly inform the luser. ++ */ ++ asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); ++ if (!(val & ICC_SRE_EL1_SRE)) ++ pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); ++} ++ ++static void gic_enable_redist(void) ++{ ++ void __iomem *rbase; ++ u32 count = 1000000; /* 1s! */ ++ u32 val; ++ ++ rbase = gic_data_rdist_rd_base(); ++ ++ /* Wake up this CPU redistributor */ ++ val = readl_relaxed(rbase + GICR_WAKER); ++ val &= ~GICR_WAKER_ProcessorSleep; ++ writel_relaxed(val, rbase + GICR_WAKER); ++ ++ while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) { ++ count--; ++ if (!count) { ++ pr_err_ratelimited("redist didn't wake up...\n"); ++ return; ++ } ++ cpu_relax(); ++ udelay(1); ++ }; ++} ++ ++/* ++ * Routines to acknowledge, disable and enable interrupts ++ */ ++static void gic_poke_irq(struct irq_data *d, u32 offset) ++{ ++ u32 mask = 1 << (gic_irq(d) % 32); ++ void (*rwp_wait)(void); ++ void __iomem *base; ++ ++ if (gic_irq_in_rdist(d)) { ++ base = gic_data_rdist_sgi_base(); ++ rwp_wait = gic_redist_wait_for_rwp; ++ } else { ++ base = gic_data.dist_base; ++ rwp_wait = gic_dist_wait_for_rwp; ++ } ++ ++ writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); ++ rwp_wait(); ++} ++ ++static int gic_peek_irq(struct irq_data *d, u32 offset) ++{ ++ u32 mask = 1 << (gic_irq(d) % 32); ++ void __iomem *base; ++ ++ if (gic_irq_in_rdist(d)) ++ base = gic_data_rdist_sgi_base(); ++ else ++ base = gic_data.dist_base; ++ ++ return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); ++} ++ ++static void gic_mask_irq(struct irq_data *d) ++{ ++ gic_poke_irq(d, GICD_ICENABLER); ++} ++ ++static void gic_unmask_irq(struct irq_data *d) ++{ ++ gic_poke_irq(d, GICD_ISENABLER); ++} ++ ++static void gic_eoi_irq(struct irq_data *d) ++{ ++ gic_write_eoir(gic_irq(d)); ++} ++ ++static int gic_set_type(struct irq_data *d, unsigned int type) ++{ ++ unsigned int irq = gic_irq(d); ++ void (*rwp_wait)(void); ++ void __iomem *base; ++ ++ /* Interrupt configuration for SGIs can't be changed */ ++ if (irq < 16) ++ return -EINVAL; ++ ++ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ return -EINVAL; ++ ++ if (gic_irq_in_rdist(d)) { ++ base = gic_data_rdist_sgi_base(); ++ rwp_wait = gic_redist_wait_for_rwp; ++ } else { ++ base = gic_data.dist_base; ++ rwp_wait = gic_dist_wait_for_rwp; ++ } ++ ++ gic_configure_irq(irq, type, base, rwp_wait); ++ ++ return 0; ++} ++ ++static u64 gic_mpidr_to_affinity(u64 mpidr) ++{ ++ u64 aff; ++ ++ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY; ++ ++ return aff; ++} ++ ++static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) ++{ ++ u64 irqnr; ++ ++ do { ++ irqnr = gic_read_iar(); ++ ++ if (likely(irqnr > 15 && irqnr < 1020)) { ++ u64 irq = irq_find_mapping(gic_data.domain, irqnr); ++ if (likely(irq)) { ++ handle_IRQ(irq, regs); ++ continue; ++ } ++ ++ WARN_ONCE(true, "Unexpected SPI received!\n"); ++ gic_write_eoir(irqnr); ++ } ++ if (irqnr < 16) { ++ gic_write_eoir(irqnr); ++#ifdef CONFIG_SMP ++ handle_IPI(irqnr, regs); ++#else ++ WARN_ONCE(true, "Unexpected SGI received!\n"); ++#endif ++ continue; ++ } ++ } while (irqnr != 0x3ff); ++} ++ ++static void __init gic_dist_init(void) ++{ ++ unsigned int i; ++ u64 affinity; ++ void __iomem *base = gic_data.dist_base; ++ ++ /* Disable the distributor */ ++ writel_relaxed(0, base + GICD_CTLR); ++ gic_dist_wait_for_rwp(); ++ ++ gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); ++ ++ /* Enable distributor with ARE, Group1 */ ++ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, ++ base + GICD_CTLR); ++ ++ /* ++ * Set all global interrupts to the boot CPU only. ARE must be ++ * enabled. ++ */ ++ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); ++ for (i = 32; i < gic_data.irq_nr; i++) ++ writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); ++} ++ ++static int gic_populate_rdist(void) ++{ ++ u64 mpidr = cpu_logical_map(smp_processor_id()); ++ u64 typer; ++ u32 aff; ++ int i; ++ ++ /* ++ * Convert affinity to a 32bit value that can be matched to ++ * GICR_TYPER bits [63:32]. ++ */ ++ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 0)); ++ ++ for (i = 0; i < gic_data.redist_regions; i++) { ++ void __iomem *ptr = gic_data.redist_base[i]; ++ u32 reg; ++ ++ reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ if (reg != 0x30 && reg != 0x40) { /* We're in trouble... */ ++ pr_warn("No redistributor present @%p\n", ptr); ++ break; ++ } ++ ++ do { ++ typer = readq_relaxed(ptr + GICR_TYPER); ++ if ((typer >> 32) == aff) { ++ gic_data_rdist_rd_base() = ptr; ++ pr_info("CPU%d: found redistributor %llx @%p\n", ++ smp_processor_id(), ++ (unsigned long long)mpidr, ptr); ++ return 0; ++ } ++ ++ if (gic_data.redist_stride) { ++ ptr += gic_data.redist_stride; ++ } else { ++ ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ ++ if (typer & GICR_TYPER_VLPIS) ++ ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ ++ } ++ } while (!(typer & GICR_TYPER_LAST)); ++ } ++ ++ /* We couldn't even deal with ourselves... */ ++ WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", ++ smp_processor_id(), (unsigned long long)mpidr); ++ return -ENODEV; ++} ++ ++static void gic_cpu_init(void) ++{ ++ void __iomem *rbase; ++ ++ /* Register ourselves with the rest of the world */ ++ if (gic_populate_rdist()) ++ return; ++ ++ gic_enable_redist(); ++ ++ rbase = gic_data_rdist_sgi_base(); ++ ++ gic_cpu_config(rbase, gic_redist_wait_for_rwp); ++ ++ /* Enable system registers */ ++ gic_enable_sre(); ++ ++ /* Set priority mask register */ ++ gic_write_pmr(DEFAULT_PMR_VALUE); ++ ++ /* EOI deactivates interrupt too (mode 0) */ ++ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); ++ ++ /* ... and let's hit the road... */ ++ gic_write_grpen1(1); ++} ++ ++#ifdef CONFIG_SMP ++static int gic_secondary_init(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) ++ gic_cpu_init(); ++ return NOTIFY_OK; ++} ++ ++/* ++ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high ++ * priority because the GIC needs to be up before the ARM generic timers. ++ */ ++static struct notifier_block gic_cpu_notifier = { ++ .notifier_call = gic_secondary_init, ++ .priority = 100, ++}; ++ ++static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, ++ u64 cluster_id) ++{ ++ int cpu = *base_cpu; ++ u64 mpidr = cpu_logical_map(cpu); ++ u16 tlist = 0; ++ ++ while (cpu < nr_cpu_ids) { ++ /* ++ * If we ever get a cluster of more than 16 CPUs, just ++ * scream and skip that CPU. ++ */ ++ if (WARN_ON((mpidr & 0xff) >= 16)) ++ goto out; ++ ++ tlist |= 1 << (mpidr & 0xf); ++ ++ cpu = cpumask_next(cpu, mask); ++ if (cpu == nr_cpu_ids) ++ goto out; ++ ++ mpidr = cpu_logical_map(cpu); ++ ++ if (cluster_id != (mpidr & ~0xffUL)) { ++ cpu--; ++ goto out; ++ } ++ } ++out: ++ *base_cpu = cpu; ++ return tlist; ++} ++ ++static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) ++{ ++ u64 val; ++ ++ val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | ++ MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | ++ irq << 24 | ++ MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | ++ tlist); ++ ++ pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); ++ gic_write_sgi1r(val); ++} ++ ++static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) ++{ ++ int cpu; ++ ++ if (WARN_ON(irq >= 16)) ++ return; ++ ++ /* ++ * Ensure that stores to Normal memory are visible to the ++ * other CPUs before issuing the IPI. ++ */ ++ smp_wmb(); ++ ++ for_each_cpu_mask(cpu, *mask) { ++ u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; ++ u16 tlist; ++ ++ tlist = gic_compute_target_list(&cpu, mask, cluster_id); ++ gic_send_sgi(cluster_id, tlist, irq); ++ } ++ ++ /* Force the above writes to ICC_SGI1R_EL1 to be executed */ ++ isb(); ++} ++ ++static void gic_smp_init(void) ++{ ++ set_smp_cross_call(gic_raise_softirq); ++ register_cpu_notifier(&gic_cpu_notifier); ++} ++ ++static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ++ bool force) ++{ ++ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); ++ void __iomem *reg; ++ int enabled; ++ u64 val; ++ ++ if (gic_irq_in_rdist(d)) ++ return -EINVAL; ++ ++ /* If interrupt was enabled, disable it first */ ++ enabled = gic_peek_irq(d, GICD_ISENABLER); ++ if (enabled) ++ gic_mask_irq(d); ++ ++ reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); ++ val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); ++ ++ writeq_relaxed(val, reg); ++ ++ /* ++ * If the interrupt was enabled, enabled it again. Otherwise, ++ * just wait for the distributor to have digested our changes. ++ */ ++ if (enabled) ++ gic_unmask_irq(d); ++ else ++ gic_dist_wait_for_rwp(); ++ ++ return IRQ_SET_MASK_OK; ++} ++#else ++#define gic_set_affinity NULL ++#define gic_smp_init() do { } while(0) ++#endif ++ ++static struct irq_chip gic_chip = { ++ .name = "GICv3", ++ .irq_mask = gic_mask_irq, ++ .irq_unmask = gic_unmask_irq, ++ .irq_eoi = gic_eoi_irq, ++ .irq_set_type = gic_set_type, ++ .irq_set_affinity = gic_set_affinity, ++}; ++ ++static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ++ irq_hw_number_t hw) ++{ ++ /* SGIs are private to the core kernel */ ++ if (hw < 16) ++ return -EPERM; ++ /* PPIs */ ++ if (hw < 32) { ++ irq_set_percpu_devid(irq); ++ irq_set_chip_and_handler(irq, &gic_chip, ++ handle_percpu_devid_irq); ++ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); ++ } ++ /* SPIs */ ++ if (hw >= 32 && hw < gic_data.irq_nr) { ++ irq_set_chip_and_handler(irq, &gic_chip, ++ handle_fasteoi_irq); ++ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); ++ } ++ irq_set_chip_data(irq, d->host_data); ++ return 0; ++} ++ ++static int gic_irq_domain_xlate(struct irq_domain *d, ++ struct device_node *controller, ++ const u32 *intspec, unsigned int intsize, ++ unsigned long *out_hwirq, unsigned int *out_type) ++{ ++ if (d->of_node != controller) ++ return -EINVAL; ++ if (intsize < 3) ++ return -EINVAL; ++ ++ switch(intspec[0]) { ++ case 0: /* SPI */ ++ *out_hwirq = intspec[1] + 32; ++ break; ++ case 1: /* PPI */ ++ *out_hwirq = intspec[1] + 16; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; ++ return 0; ++} ++ ++static const struct irq_domain_ops gic_irq_domain_ops = { ++ .map = gic_irq_domain_map, ++ .xlate = gic_irq_domain_xlate, ++}; ++ ++static int __init gic_of_init(struct device_node *node, struct device_node *parent) ++{ ++ void __iomem *dist_base; ++ void __iomem **redist_base; ++ u64 redist_stride; ++ u32 redist_regions; ++ u32 reg; ++ int gic_irqs; ++ int err; ++ int i; ++ ++ dist_base = of_iomap(node, 0); ++ if (!dist_base) { ++ pr_err("%s: unable to map gic dist registers\n", ++ node->full_name); ++ return -ENXIO; ++ } ++ ++ reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ if (reg != 0x30 && reg != 0x40) { ++ pr_err("%s: no distributor detected, giving up\n", ++ node->full_name); ++ err = -ENODEV; ++ goto out_unmap_dist; ++ } ++ ++ if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) ++ redist_regions = 1; ++ ++ redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); ++ if (!redist_base) { ++ err = -ENOMEM; ++ goto out_unmap_dist; ++ } ++ ++ for (i = 0; i < redist_regions; i++) { ++ redist_base[i] = of_iomap(node, 1 + i); ++ if (!redist_base[i]) { ++ pr_err("%s: couldn't map region %d\n", ++ node->full_name, i); ++ err = -ENODEV; ++ goto out_unmap_rdist; ++ } ++ } ++ ++ if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) ++ redist_stride = 0; ++ ++ gic_data.dist_base = dist_base; ++ gic_data.redist_base = redist_base; ++ gic_data.redist_regions = redist_regions; ++ gic_data.redist_stride = redist_stride; ++ ++ /* ++ * Find out how many interrupts are supported. ++ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) ++ */ ++ gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; ++ gic_irqs = (gic_irqs + 1) * 32; ++ if (gic_irqs > 1020) ++ gic_irqs = 1020; ++ gic_data.irq_nr = gic_irqs; ++ ++ gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, ++ &gic_data); ++ gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); ++ ++ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { ++ err = -ENOMEM; ++ goto out_free; ++ } ++ ++ set_handle_irq(gic_handle_irq); ++ ++ gic_smp_init(); ++ gic_dist_init(); ++ gic_cpu_init(); ++ ++ return 0; ++ ++out_free: ++ if (gic_data.domain) ++ irq_domain_remove(gic_data.domain); ++ free_percpu(gic_data.rdist); ++out_unmap_rdist: ++ for (i = 0; i < redist_regions; i++) ++ if (redist_base[i]) ++ iounmap(redist_base[i]); ++ kfree(redist_base); ++out_unmap_dist: ++ iounmap(dist_base); ++ return err; ++} ++ ++IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +index 7e11c9d..5a75b97 100644 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -46,6 +46,7 @@ + #include + #include + ++#include "irq-gic-common.h" + #include "irqchip.h" + + union gic_base { +@@ -188,12 +189,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + { + void __iomem *base = gic_dist_base(d); + unsigned int gicirq = gic_irq(d); +- u32 enablemask = 1 << (gicirq % 32); +- u32 enableoff = (gicirq / 32) * 4; +- u32 confmask = 0x2 << ((gicirq % 16) * 2); +- u32 confoff = (gicirq / 16) * 4; +- bool enabled = false; +- u32 val; + + /* Interrupt configuration for SGIs can't be changed */ + if (gicirq < 16) +@@ -207,25 +202,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + +- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); +- if (type == IRQ_TYPE_LEVEL_HIGH) +- val &= ~confmask; +- else if (type == IRQ_TYPE_EDGE_RISING) +- val |= confmask; +- +- /* +- * As recommended by the spec, disable the interrupt before changing +- * the configuration +- */ +- if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { +- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); +- enabled = true; +- } +- +- writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); +- +- if (enabled) +- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); ++ gic_configure_irq(gicirq, type, base, NULL); + + raw_spin_unlock(&irq_controller_lock); + +@@ -387,12 +364,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic) + writel_relaxed(0, base + GIC_DIST_CTRL); + + /* +- * Set all global interrupts to be level triggered, active low. +- */ +- for (i = 32; i < gic_irqs; i += 16) +- writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); +- +- /* + * Set all global interrupts to this CPU only. + */ + cpumask = gic_get_cpumask(gic); +@@ -401,18 +372,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) + for (i = 32; i < gic_irqs; i += 4) + writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); + +- /* +- * Set priority on all global interrupts. +- */ +- for (i = 32; i < gic_irqs; i += 4) +- writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); +- +- /* +- * Disable all interrupts. Leave the PPI and SGIs alone +- * as these enables are banked registers. +- */ +- for (i = 32; i < gic_irqs; i += 32) +- writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); ++ gic_dist_config(base, gic_irqs, NULL); + + writel_relaxed(1, base + GIC_DIST_CTRL); + } +@@ -422,6 +382,7 @@ static void gic_cpu_init(struct gic_chip_data *gic) + void __iomem *dist_base = gic_data_dist_base(gic); + void __iomem *base = gic_data_cpu_base(gic); + unsigned int cpu_mask, cpu = smp_processor_id(); ++ unsigned int ctrl_mask; + int i; + + /* +@@ -439,27 +400,32 @@ static void gic_cpu_init(struct gic_chip_data *gic) + if (i != cpu) + gic_cpu_map[i] &= ~cpu_mask; + +- /* +- * Deal with the banked PPI and SGI interrupts - disable all +- * PPI interrupts, ensure all SGI interrupts are enabled. +- */ +- writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); +- writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); +- +- /* +- * Set priority on PPI and SGI interrupts +- */ +- for (i = 0; i < 32; i += 4) +- writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); ++ gic_cpu_config(dist_base, NULL); + + writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); +- writel_relaxed(1, base + GIC_CPU_CTRL); ++ ++ ctrl_mask = readl(base + GIC_CPU_CTRL); ++ ++ /* Mask out the gic v2 bypass bits */ ++ ctrl_mask &= 0x1e0; ++ ++ /* Enable group 0 */ ++ ctrl_mask |= 0x1; ++ writel_relaxed(ctrl_mask, base + GIC_CPU_CTRL); + } + + void gic_cpu_if_down(void) + { ++ unsigned int ctrl_mask; + void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); +- writel_relaxed(0, cpu_base + GIC_CPU_CTRL); ++ ++ ctrl_mask = readl(cpu_base + GIC_CPU_CTRL); ++ /* ++ * Disable grp enable bit, leave the bypass bits alone as changing ++ * them could leave the system unstable ++ */ ++ ctrl_mask &= 0x1e0; ++ writel_relaxed(ctrl_mask, cpu_base + GIC_CPU_CTRL); + } + + #ifdef CONFIG_CPU_PM +@@ -570,6 +536,7 @@ static void gic_cpu_restore(unsigned int gic_nr) + { + int i; + u32 *ptr; ++ unsigned int ctrl_mask; + void __iomem *dist_base; + void __iomem *cpu_base; + +@@ -594,7 +561,15 @@ static void gic_cpu_restore(unsigned int gic_nr) + writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); + + writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); +- writel_relaxed(1, cpu_base + GIC_CPU_CTRL); ++ ++ ctrl_mask = readl(cpu_base + GIC_CPU_CTRL); ++ ++ /* Mask out the gic v2 bypass bits */ ++ ctrl_mask &= 0x1e0; ++ ++ /* Enable group 0 */ ++ ctrl_mask |= 0x1; ++ writel_relaxed(ctrl_mask, cpu_base + GIC_CPU_CTRL); + } + + static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) +diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig +index edb7186..dc7406c 100644 +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -24,6 +24,7 @@ source "drivers/net/ethernet/allwinner/Kconfig" + source "drivers/net/ethernet/alteon/Kconfig" + source "drivers/net/ethernet/altera/Kconfig" + source "drivers/net/ethernet/amd/Kconfig" ++source "drivers/net/ethernet/apm/Kconfig" + source "drivers/net/ethernet/apple/Kconfig" + source "drivers/net/ethernet/arc/Kconfig" + source "drivers/net/ethernet/atheros/Kconfig" +diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile +index 58de333..224a018 100644 +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ + obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ + obj-$(CONFIG_ALTERA_TSE) += altera/ + obj-$(CONFIG_NET_VENDOR_AMD) += amd/ ++obj-$(CONFIG_NET_XGENE) += apm/ + obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ + obj-$(CONFIG_NET_VENDOR_ARC) += arc/ + obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ +diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig +new file mode 100644 +index 0000000..ec63d70 +--- /dev/null ++++ b/drivers/net/ethernet/apm/Kconfig +@@ -0,0 +1 @@ ++source "drivers/net/ethernet/apm/xgene/Kconfig" +diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile +new file mode 100644 +index 0000000..65ce32a +--- /dev/null ++++ b/drivers/net/ethernet/apm/Makefile +@@ -0,0 +1,5 @@ ++# ++# Makefile for APM X-GENE Ethernet driver. ++# ++ ++obj-$(CONFIG_NET_XGENE) += xgene/ +diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig +new file mode 100644 +index 0000000..616dff6 +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/Kconfig +@@ -0,0 +1,9 @@ ++config NET_XGENE ++ tristate "APM X-Gene SoC Ethernet Driver" ++ select PHYLIB ++ help ++ This is the Ethernet driver for the on-chip ethernet interface on the ++ APM X-Gene SoC. ++ ++ To compile this driver as a module, choose M here. This module will ++ be called xgene_enet. +diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile +new file mode 100644 +index 0000000..c643e8a +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/Makefile +@@ -0,0 +1,6 @@ ++# ++# Makefile for APM X-Gene Ethernet Driver. ++# ++ ++xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o xgene_enet_ethtool.o ++obj-$(CONFIG_NET_XGENE) += xgene-enet.o +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +new file mode 100644 +index 0000000..63f2aa5 +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +@@ -0,0 +1,125 @@ ++/* Applied Micro X-Gene SoC Ethernet Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Authors: Iyappan Subramanian ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include "xgene_enet_main.h" ++ ++struct xgene_gstrings_stats { ++ char name[ETH_GSTRING_LEN]; ++ int offset; ++}; ++ ++#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) } ++ ++static const struct xgene_gstrings_stats gstrings_stats[] = { ++ XGENE_STAT(rx_packets), ++ XGENE_STAT(tx_packets), ++ XGENE_STAT(rx_bytes), ++ XGENE_STAT(tx_bytes), ++ XGENE_STAT(rx_errors), ++ XGENE_STAT(tx_errors), ++ XGENE_STAT(rx_length_errors), ++ XGENE_STAT(rx_crc_errors), ++ XGENE_STAT(rx_frame_errors), ++ XGENE_STAT(rx_fifo_errors) ++}; ++ ++#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats) ++ ++static void xgene_get_drvinfo(struct net_device *ndev, ++ struct ethtool_drvinfo *info) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct platform_device *pdev = pdata->pdev; ++ ++ strcpy(info->driver, "xgene_enet"); ++ strcpy(info->version, XGENE_DRV_VERSION); ++ snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A"); ++ sprintf(info->bus_info, "%s", pdev->name); ++} ++ ++static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct phy_device *phydev = pdata->phy_dev; ++ ++ if (phydev == NULL) ++ return -ENODEV; ++ ++ return phy_ethtool_gset(phydev, cmd); ++} ++ ++static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct phy_device *phydev = pdata->phy_dev; ++ ++ if (phydev == NULL) ++ return -ENODEV; ++ ++ return phy_ethtool_sset(phydev, cmd); ++} ++ ++static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) ++{ ++ int i; ++ u8 *p = data; ++ ++ if (stringset != ETH_SS_STATS) ++ return; ++ ++ for (i = 0; i < XGENE_STATS_LEN; i++) { ++ memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++} ++ ++static int xgene_get_sset_count(struct net_device *ndev, int sset) ++{ ++ if (sset != ETH_SS_STATS) ++ return -EINVAL; ++ ++ return XGENE_STATS_LEN; ++} ++ ++static void xgene_get_ethtool_stats(struct net_device *ndev, ++ struct ethtool_stats *dummy, ++ u64 *data) ++{ ++ void *pdata = netdev_priv(ndev); ++ int i; ++ ++ for (i = 0; i < XGENE_STATS_LEN; i++) ++ *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); ++} ++ ++static const struct ethtool_ops xgene_ethtool_ops = { ++ .get_drvinfo = xgene_get_drvinfo, ++ .get_settings = xgene_get_settings, ++ .set_settings = xgene_set_settings, ++ .get_link = ethtool_op_get_link, ++ .get_strings = xgene_get_strings, ++ .get_sset_count = xgene_get_sset_count, ++ .get_ethtool_stats = xgene_get_ethtool_stats ++}; ++ ++void xgene_enet_set_ethtool_ops(struct net_device *ndev) ++{ ++ ndev->ethtool_ops = &xgene_ethtool_ops; ++} +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +new file mode 100644 +index 0000000..6c4a484 +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +@@ -0,0 +1,848 @@ ++/* Applied Micro X-Gene SoC Ethernet Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Authors: Iyappan Subramanian ++ * Ravi Patel ++ * Keyur Chudgar ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include "xgene_enet_main.h" ++#include "xgene_enet_hw.h" ++ ++u64 xgene_prepare_eth_work_msg(u8 l4hlen, u8 l3hlen, u8 ethhdr, ++ u8 csum_enable, u8 proto) ++{ ++ u64 hopinfo; ++ ++ hopinfo = (l4hlen & TCPHDR_MASK) | ++ ((l3hlen << IPHDR_POS) & IPHDR_MASK) | ++ (ethhdr << ETHHDR_POS) | ++ ((csum_enable << EC_POS) & EC_MASK) | ++ ((proto << IS_POS) & IS_MASK) | ++ INSERT_CRC | ++ TYPE_ETH_WORK_MESSAGE; ++ ++ return hopinfo; ++} ++ ++/* Tx descriptor raw write */ ++void xgene_set_tx_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc *raw_desc) ++{ ++ raw_desc->m0 = ring->desc.userinfo; ++ raw_desc->m1 = (ring->desc.dataaddr & DATAADDR_MASK) | ++ (((u64)ring->desc.bufdatalen << BUFDATALEN_POS) & ++ BUFDATALEN_MASK) | COHERENT_MASK; ++ raw_desc->m3 = (((u64)ring->desc.henqnum << HENQNUM_POS) & ++ HENQNUM_MASK) | ++ ring->desc.hopinfo_lsb; ++} ++ ++/* descriptor raw read */ ++void xgene_get_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc *raw_desc) ++{ ++ struct xgene_enet_desc *desc = &ring->desc; ++ ++ desc->dataaddr = raw_desc->m1 & DATAADDR_MASK; ++ desc->bufdatalen = (raw_desc->m1 & BUFDATALEN_MASK) >> BUFDATALEN_POS; ++ desc->userinfo = raw_desc->m0 & USERINFO_MASK; ++ desc->fpqnum = (raw_desc->m0 & FPQNUM_MASK) >> FPQNUM_POS; ++ desc->status = (raw_desc->m0 & LERR_MASK) >> LERR_POS; ++} ++ ++/* Bufpool descriptor raw write common fields */ ++void xgene_set_init_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc) ++{ ++ raw_desc->m0 = (ring->desc.userinfo) | ++ (((u64)ring->desc.fpqnum << FPQNUM_POS) & FPQNUM_MASK) | ++ STASHING_MASK; ++} ++ ++/* Bufpool descriptor raw write */ ++void xgene_set_refill_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc) ++{ ++ raw_desc->m1 = (ring->desc.dataaddr & DATAADDR_MASK) | ++ (((u64)ring->desc.bufdatalen << BUFDATALEN_POS) & ++ BUFDATALEN_MASK) | ++ COHERENT_MASK; ++} ++ ++/* Bufpool descriptor raw read */ ++void xgene_get_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc) ++{ ++ struct xgene_enet_desc *desc = &ring->desc; ++ ++ desc->userinfo = raw_desc->m0 & USERINFO_MASK; ++} ++ ++static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) ++{ ++ u32 *ring_cfg = ring->state; ++ u64 addr = ring->dma; ++ enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; ++ ++ ring_cfg[4] |= (1 << SELTHRSH_POS) & ++ CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); ++ ring_cfg[3] |= ACCEPTLERR; ++ ring_cfg[2] |= QCOHERENT; ++ ++ addr >>= 8; ++ ring_cfg[2] |= (addr << RINGADDRL_POS) & ++ CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); ++ addr >>= RINGADDRL_LEN; ++ ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); ++ ring_cfg[3] |= ((u32) cfgsize << RINGSIZE_POS) & ++ CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); ++} ++ ++static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) ++{ ++ u32 *ring_cfg = ring->state; ++ bool is_bufpool; ++ u32 val; ++ ++ is_bufpool = xgene_enet_is_bufpool(ring->id); ++ val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; ++ ring_cfg[4] |= (val << RINGTYPE_POS) & ++ CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); ++ ++ if (is_bufpool) { ++ ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & ++ CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); ++ } ++} ++ ++static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) ++{ ++ u32 *ring_cfg = ring->state; ++ ++ ring_cfg[3] |= RECOMBBUF; ++ ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & ++ CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); ++ ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); ++} ++ ++static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, ++ u32 offset, u32 data) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); ++ ++ iowrite32(data, pdata->ring_csr_addr + offset); ++} ++ ++static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, ++ u32 offset, u32 *data) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); ++ ++ *data = ioread32(pdata->ring_csr_addr + offset); ++} ++ ++static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) ++{ ++ int i; ++ ++ xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); ++ for (i = 0; i < NUM_RING_CONFIG; i++) { ++ xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), ++ ring->state[i]); ++ } ++} ++ ++static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) ++{ ++ memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); ++ xgene_enet_write_ring_state(ring); ++} ++ ++static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) ++{ ++ xgene_enet_ring_set_type(ring); ++ ++ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) ++ xgene_enet_ring_set_recombbuf(ring); ++ ++ xgene_enet_ring_init(ring); ++ xgene_enet_write_ring_state(ring); ++} ++ ++static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) ++{ ++ u32 ring_id_val, ring_id_buf; ++ bool is_bufpool; ++ ++ is_bufpool = xgene_enet_is_bufpool(ring->id); ++ ++ ring_id_val = ring->id & GENMASK(9, 0); ++ ring_id_val |= OVERWRITE; ++ ++ ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ++ ring_id_buf |= PREFETCH_BUF_EN; ++ if (is_bufpool) ++ ring_id_buf |= IS_BUFFER_POOL; ++ ++ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); ++ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); ++} ++ ++static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) ++{ ++ u32 ring_id; ++ ++ ring_id = ring->id | OVERWRITE; ++ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); ++ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); ++} ++ ++struct xgene_enet_desc_ring *xgene_enet_setup_ring( ++ struct xgene_enet_desc_ring *ring) ++{ ++ u32 size = ring->size; ++ u32 i, data; ++ u64 *desc; ++ bool is_bufpool; ++ ++ xgene_enet_clr_ring_state(ring); ++ xgene_enet_set_ring_state(ring); ++ xgene_enet_set_ring_id(ring); ++ ++ ring->slots = xgene_enet_get_numslots(ring->id, size); ++ ++ is_bufpool = xgene_enet_is_bufpool(ring->id); ++ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) ++ return ring; ++ ++ for (i = 0; i < ring->slots; i++) { ++ desc = (u64 *)&ring->raw_desc[i]; ++ desc[EMPTY_SLOT_INDEX] = EMPTY_SLOT; ++ } ++ ++ xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); ++ data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); ++ xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); ++ ++ return ring; ++} ++ ++void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) ++{ ++ u32 data; ++ bool is_bufpool; ++ ++ is_bufpool = xgene_enet_is_bufpool(ring->id); ++ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) ++ goto out; ++ ++ xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); ++ data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); ++ xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); ++ ++out: ++ xgene_enet_clr_desc_ring_id(ring); ++ xgene_enet_clr_ring_state(ring); ++} ++ ++void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_pdata *pdata, ++ enum xgene_enet_err_code status) ++{ ++ struct rtnl_link_stats64 *stats = &pdata->stats; ++ ++ switch (status) { ++ case INGRESS_CRC: ++ stats->rx_crc_errors++; ++ break; ++ case INGRESS_CHECKSUM: ++ case INGRESS_CHECKSUM_COMPUTE: ++ stats->rx_errors++; ++ break; ++ case INGRESS_TRUNC_FRAME: ++ stats->rx_frame_errors++; ++ break; ++ case INGRESS_PKT_LEN: ++ stats->rx_length_errors++; ++ break; ++ case INGRESS_PKT_UNDER: ++ stats->rx_frame_errors++; ++ break; ++ case INGRESS_FIFO_OVERRUN: ++ stats->rx_fifo_errors++; ++ break; ++ default: ++ break; ++ } ++} ++ ++static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 val) ++{ ++ void *addr = pdata->eth_csr_addr + offset; ++ ++ iowrite32(val, addr); ++} ++ ++static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 val) ++{ ++ void *addr = pdata->eth_ring_if_addr + offset; ++ ++ iowrite32(val, addr); ++} ++ ++static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 val) ++{ ++ void *addr = pdata->eth_diag_csr_addr + offset; ++ ++ iowrite32(val, addr); ++} ++ ++static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 val) ++{ ++ void *addr = pdata->mcx_mac_csr_addr + offset; ++ ++ iowrite32(val, addr); ++} ++ ++static bool xgene_enet_wr_indirect(void *addr, void *wr, void *cmd, ++ void *cmd_done, u32 wr_addr, u32 wr_data) ++{ ++ u32 done; ++ u8 wait = 10; ++ ++ iowrite32(wr_addr, addr); ++ iowrite32(wr_data, wr); ++ iowrite32(XGENE_ENET_WR_CMD, cmd); ++ ++ /* wait for write command to complete */ ++ while (!(done = ioread32(cmd_done)) && wait--) ++ udelay(1); ++ ++ if (!done) ++ return false; ++ ++ iowrite32(0, cmd); ++ ++ return true; ++} ++ ++static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, ++ u32 wr_addr, u32 wr_data) ++{ ++ void *addr, *wr, *cmd, *cmd_done; ++ bool ret; ++ ++ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; ++ wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; ++ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; ++ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; ++ ++ ret = xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data); ++ if (!ret) ++ netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", ++ wr_addr); ++} ++ ++static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 *val) ++{ ++ void *addr = pdata->eth_csr_addr + offset; ++ ++ *val = ioread32(addr); ++} ++ ++static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 *val) ++{ ++ void *addr = pdata->eth_diag_csr_addr + offset; ++ ++ *val = ioread32(addr); ++} ++ ++static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, ++ u32 offset, u32 *val) ++{ ++ void *addr = pdata->mcx_mac_csr_addr + offset; ++ ++ *val = ioread32(addr); ++} ++ ++static bool xgene_enet_rd_indirect(void *addr, void *rd, void *cmd, ++ void *cmd_done, u32 rd_addr, u32 *rd_data) ++{ ++ u32 done; ++ u8 wait = 10; ++ ++ iowrite32(rd_addr, addr); ++ iowrite32(XGENE_ENET_RD_CMD, cmd); ++ ++ /* wait for read command to complete */ ++ while (!(done = ioread32(cmd_done)) && wait--) ++ udelay(1); ++ ++ if (!done) ++ return false; ++ ++ *rd_data = ioread32(rd); ++ iowrite32(0, cmd); ++ ++ return true; ++} ++ ++static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, ++ u32 rd_addr, u32 *rd_data) ++{ ++ void *addr, *rd, *cmd, *cmd_done; ++ bool ret; ++ ++ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; ++ rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; ++ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; ++ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; ++ ++ ret = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data); ++ if (!ret) ++ netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", ++ rd_addr); ++} ++ ++static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, ++ u32 reg, u16 data) ++{ ++ u32 addr = 0, wr_data = 0; ++ u32 done; ++ u8 wait = 10; ++ ++ PHY_ADDR_SET(&addr, phy_id); ++ REG_ADDR_SET(&addr, reg); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); ++ ++ PHY_CONTROL_SET(&wr_data, data); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); ++ do { ++ usleep_range(5, 10); ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); ++ } while ((done & BUSY_MASK) && wait--); ++ ++ if (done & BUSY_MASK) { ++ netdev_err(pdata->ndev, "MII_MGMT write failed\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, ++ u8 phy_id, u32 reg) ++{ ++ u32 addr = 0; ++ u32 data, done; ++ u8 wait = 10; ++ ++ PHY_ADDR_SET(&addr, phy_id); ++ REG_ADDR_SET(&addr, reg); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); ++ do { ++ usleep_range(5, 10); ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); ++ } while ((done & BUSY_MASK) && wait--); ++ ++ if (done & BUSY_MASK) { ++ netdev_err(pdata->ndev, "MII_MGMT read failed\n"); ++ return -1; ++ } ++ ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); ++ ++ return data; ++} ++ ++void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) ++{ ++ u32 addr0, addr1; ++ u8 *dev_addr = pdata->ndev->dev_addr; ++ ++ addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | ++ (dev_addr[1] << 8) | dev_addr[0]; ++ addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); ++ addr1 |= pdata->phy_addr & 0xFFFF; ++ ++ xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); ++ xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); ++} ++ ++static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) ++{ ++ struct net_device *ndev = pdata->ndev; ++ u32 data; ++ u8 wait = 10; ++ ++ xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); ++ do { ++ usleep_range(100, 110); ++ xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); ++ } while ((data != 0xffffffff) && wait--); ++ ++ if (data != 0xffffffff) { ++ netdev_err(ndev, "Failed to release memory from shutdown\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void xgene_gmac_phy_enable_scan_cycle(struct xgene_enet_pdata *pdata) ++{ ++ u32 val; ++ ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, &val); ++ SCAN_CYCLE_MASK_SET(&val, 1); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, val); ++ ++ /* Program phy address start scan from 0 and register at address 0x1 */ ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, &val); ++ PHY_ADDR_SET(&val, pdata->phy_dev->addr); ++ REG_ADDR_SET(&val, MII_BMSR); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, val); ++} ++ ++void xgene_gmac_reset(struct xgene_enet_pdata *pdata) ++{ ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); ++} ++ ++void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed) ++{ ++ u32 value, mc2; ++ u32 intf_ctl, rgmii; ++ u32 icm0, icm2; ++ ++ xgene_gmac_reset(pdata); ++ ++ xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); ++ xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); ++ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); ++ xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); ++ xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); ++ ++ switch (speed) { ++ case SPEED_10: ++ ENET_INTERFACE_MODE2_SET(&mc2, 1); ++ CFG_MACMODE_SET(&icm0, 0); ++ CFG_WAITASYNCRD_SET(&icm2, 500); ++ rgmii &= ~CFG_SPEED_1250; ++ break; ++ case SPEED_100: ++ ENET_INTERFACE_MODE2_SET(&mc2, 1); ++ intf_ctl |= ENET_LHD_MODE; ++ CFG_MACMODE_SET(&icm0, 1); ++ CFG_WAITASYNCRD_SET(&icm2, 80); ++ rgmii &= ~CFG_SPEED_1250; ++ break; ++ default: ++ ENET_INTERFACE_MODE2_SET(&mc2, 2); ++ intf_ctl |= ENET_GHD_MODE; ++ CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); ++ xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); ++ value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; ++ xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); ++ break; ++ } ++ ++ mc2 |= FULL_DUPLEX2; ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); ++ xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); ++ ++ xgene_gmac_set_mac_addr(pdata); ++ ++ /* Adjust MDC clock frequency */ ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); ++ MGMT_CLOCK_SEL_SET(&value, 7); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); ++ ++ /* Enable drop if bufpool not available */ ++ xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); ++ value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; ++ xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); ++ ++ /* Rtype should be copied from FP */ ++ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); ++ xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); ++ ++ /* Rx-Tx traffic resume */ ++ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); ++ ++ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); ++ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); ++ ++ xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); ++ value &= ~TX_DV_GATE_EN0; ++ value &= ~RX_DV_GATE_EN0; ++ value |= RESUME_RX0; ++ xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); ++ ++ xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); ++} ++ ++static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) ++{ ++ u32 val = 0xffffffff; ++ ++ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); ++ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); ++ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); ++ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); ++} ++ ++void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, ++ u32 dst_ring_num, u16 bufpool_id) ++{ ++ u32 cb; ++ u32 fpsel; ++ ++ fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; ++ ++ xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); ++ cb |= CFG_CLE_BYPASS_EN0; ++ CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); ++ xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); ++ ++ xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); ++ CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); ++ CFG_CLE_FPSEL0_SET(&cb, fpsel); ++ xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); ++} ++ ++void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) ++{ ++ u32 data; ++ ++ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); ++} ++ ++void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) ++{ ++ u32 data; ++ ++ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); ++} ++ ++void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) ++{ ++ u32 data; ++ ++ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); ++} ++ ++void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) ++{ ++ u32 data; ++ ++ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); ++ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); ++} ++ ++void xgene_enet_reset(struct xgene_enet_pdata *pdata) ++{ ++ u32 val; ++ ++ clk_prepare_enable(pdata->clk); ++ clk_disable_unprepare(pdata->clk); ++ clk_prepare_enable(pdata->clk); ++ xgene_enet_ecc_init(pdata); ++ xgene_enet_config_ring_if_assoc(pdata); ++ ++ /* Enable auto-incr for scanning */ ++ xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); ++ val |= SCAN_AUTO_INCR; ++ MGMT_CLOCK_SEL_SET(&val, 1); ++ xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); ++} ++ ++void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) ++{ ++ clk_disable_unprepare(pdata->clk); ++} ++ ++static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) ++{ ++ struct xgene_enet_pdata *pdata = bus->priv; ++ u32 val; ++ ++ val = xgene_mii_phy_read(pdata, mii_id, regnum); ++ netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", ++ mii_id, regnum, val); ++ ++ return val; ++} ++ ++static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, ++ u16 val) ++{ ++ struct xgene_enet_pdata *pdata = bus->priv; ++ int ret; ++ ++ netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", ++ mii_id, regnum, val); ++ ret = xgene_mii_phy_write(pdata, mii_id, regnum, val); ++ ++ return ret; ++} ++ ++static void xgene_enet_adjust_link(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct phy_device *phydev = pdata->phy_dev; ++ bool status_change = false; ++ ++ if (phydev->link && pdata->phy_speed != phydev->speed) { ++ xgene_gmac_init(pdata, phydev->speed); ++ pdata->phy_speed = phydev->speed; ++ status_change = true; ++ } ++ ++ if (pdata->phy_link != phydev->link) { ++ if (!phydev->link) ++ pdata->phy_speed = 0; ++ pdata->phy_link = phydev->link; ++ status_change = true; ++ } ++ ++ if (!status_change) ++ return; ++ ++ if (phydev->link) { ++ xgene_gmac_rx_enable(pdata); ++ xgene_gmac_tx_enable(pdata); ++ } else { ++ xgene_gmac_rx_disable(pdata); ++ xgene_gmac_tx_disable(pdata); ++ } ++ phy_print_status(phydev); ++} ++ ++static int xgene_enet_phy_connect(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct device_node *phy_np; ++ struct phy_device *phy_dev; ++ struct device *dev = &pdata->pdev->dev; ++ ++ phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); ++ if (!phy_np) { ++ netdev_dbg(ndev, "No phy-handle found\n"); ++ return -ENODEV; ++ } ++ ++ phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, ++ 0, pdata->phy_mode); ++ if (!phy_dev) { ++ netdev_err(ndev, "Could not connect to PHY\n"); ++ return -ENODEV; ++ } ++ ++ pdata->phy_link = 0; ++ pdata->phy_speed = 0; ++ pdata->phy_dev = phy_dev; ++ ++ return 0; ++} ++ ++int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) ++{ ++ struct net_device *ndev = pdata->ndev; ++ struct device *dev = &pdata->pdev->dev; ++ struct device_node *child_np; ++ struct device_node *mdio_np = NULL; ++ struct mii_bus *mdio_bus; ++ int ret; ++ ++ for_each_child_of_node(dev->of_node, child_np) { ++ if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { ++ mdio_np = child_np; ++ break; ++ } ++ } ++ ++ if (!mdio_np) { ++ netdev_dbg(ndev, "No mdio node in the dts\n"); ++ return -1; ++ } ++ ++ mdio_bus = mdiobus_alloc(); ++ if (!mdio_bus) ++ return -ENOMEM; ++ ++ mdio_bus->name = "APM X-Gene MDIO bus"; ++ mdio_bus->read = xgene_enet_mdio_read; ++ mdio_bus->write = xgene_enet_mdio_write; ++ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", ++ ndev->name); ++ ++ mdio_bus->irq = devm_kcalloc(dev, PHY_MAX_ADDR, sizeof(int), ++ GFP_KERNEL); ++ if (!mdio_bus->irq) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ mdio_bus->priv = pdata; ++ mdio_bus->parent = &ndev->dev; ++ ++ ret = of_mdiobus_register(mdio_bus, mdio_np); ++ if (ret) { ++ netdev_err(ndev, "Failed to register MDIO bus\n"); ++ goto err; ++ } ++ pdata->mdio_bus = mdio_bus; ++ ++ ret = xgene_enet_phy_connect(ndev); ++ if (ret) ++ goto err; ++ xgene_gmac_phy_enable_scan_cycle(pdata); ++ ++ return ret; ++ ++err: ++ if (mdio_bus->irq) ++ devm_kfree(dev, mdio_bus->irq); ++ mdiobus_free(mdio_bus); ++ ++ return ret; ++} ++ ++int xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) ++{ ++ struct mii_bus *mdio_bus; ++ ++ mdio_bus = pdata->mdio_bus; ++ mdiobus_unregister(mdio_bus); ++ mdiobus_free(mdio_bus); ++ pdata->mdio_bus = NULL; ++ ++ return 0; ++} +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +new file mode 100644 +index 0000000..934baca +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +@@ -0,0 +1,394 @@ ++/* Applied Micro X-Gene SoC Ethernet Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Authors: Iyappan Subramanian ++ * Ravi Patel ++ * Keyur Chudgar ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#ifndef __XGENE_ENET_HW_H__ ++#define __XGENE_ENET_HW_H__ ++ ++#include "xgene_enet_main.h" ++ ++struct xgene_enet_pdata; ++struct xgene_enet_stats; ++ ++/* clears and then set bits */ ++static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len) ++{ ++ u32 end = start + len - 1; ++ u32 mask = GENMASK(end, start); ++ ++ *dst &= ~mask; ++ *dst |= (val << start) & mask; ++} ++ ++static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) ++{ ++ return (val & GENMASK(end, start)) >> start; ++} ++ ++#define CSR_RING_ID 0x0008 ++#define OVERWRITE BIT(31) ++#define IS_BUFFER_POOL BIT(20) ++#define PREFETCH_BUF_EN BIT(21) ++#define CSR_RING_ID_BUF 0x000c ++#define CSR_RING_NE_INT_MODE 0x017c ++#define CSR_RING_CONFIG 0x006c ++#define CSR_RING_WR_BASE 0x0070 ++#define NUM_RING_CONFIG 5 ++#define BUFPOOL_MODE 3 ++#define RM3 3 ++#define INC_DEC_CMD_ADDR 0x002c ++#define UDP_HDR_SIZE 2 ++#define BUF_LEN_CODE_2K 0x5000 ++ ++#define CREATE_MASK(pos, len) GENMASK((pos)+(len)-1, (pos)) ++#define CREATE_MASK_ULL(pos, len) GENMASK_ULL((pos)+(len)-1, (pos)) ++ ++/* Empty slot soft signature */ ++#define EMPTY_SLOT_INDEX 1 ++#define EMPTY_SLOT ~0ULL ++ ++#define WORK_DESC_SIZE 32 ++#define BUFPOOL_DESC_SIZE 16 ++ ++#define RING_OWNER_MASK GENMASK(9, 6) ++#define RING_BUFNUM_MASK GENMASK(5, 0) ++ ++#define SELTHRSH_POS 3 ++#define SELTHRSH_LEN 3 ++#define RINGADDRL_POS 5 ++#define RINGADDRL_LEN 27 ++#define RINGADDRH_POS 0 ++#define RINGADDRH_LEN 6 ++#define RINGSIZE_POS 23 ++#define RINGSIZE_LEN 3 ++#define RINGTYPE_POS 19 ++#define RINGTYPE_LEN 2 ++#define RINGMODE_POS 20 ++#define RINGMODE_LEN 3 ++#define RECOMTIMEOUTL_POS 28 ++#define RECOMTIMEOUTL_LEN 3 ++#define RECOMTIMEOUTH_POS 0 ++#define RECOMTIMEOUTH_LEN 2 ++#define NUMMSGSINQ_POS 1 ++#define NUMMSGSINQ_LEN 16 ++#define ACCEPTLERR BIT(19) ++#define QCOHERENT BIT(4) ++#define RECOMBBUF BIT(27) ++ ++#define BLOCK_ETH_CSR_OFFSET 0x2000 ++#define BLOCK_ETH_RING_IF_OFFSET 0x9000 ++#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xC000 ++#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 ++ ++#define BLOCK_ETH_MAC_OFFSET 0x0000 ++#define BLOCK_ETH_STATS_OFFSET 0x0014 ++#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 ++ ++#define MAC_ADDR_REG_OFFSET 0x00 ++#define MAC_COMMAND_REG_OFFSET 0x04 ++#define MAC_WRITE_REG_OFFSET 0x08 ++#define MAC_READ_REG_OFFSET 0x0c ++#define MAC_COMMAND_DONE_REG_OFFSET 0x10 ++ ++#define STAT_ADDR_REG_OFFSET 0x00 ++#define STAT_COMMAND_REG_OFFSET 0x04 ++#define STAT_WRITE_REG_OFFSET 0x08 ++#define STAT_READ_REG_OFFSET 0x0c ++#define STAT_COMMAND_DONE_REG_OFFSET 0x10 ++ ++#define MII_MGMT_CONFIG_ADDR 0x20 ++#define MII_MGMT_COMMAND_ADDR 0x24 ++#define MII_MGMT_ADDRESS_ADDR 0x28 ++#define MII_MGMT_CONTROL_ADDR 0x2c ++#define MII_MGMT_STATUS_ADDR 0x30 ++#define MII_MGMT_INDICATORS_ADDR 0x34 ++ ++#define BUSY_MASK BIT(0) ++#define READ_CYCLE_MASK BIT(0) ++#define PHY_CONTROL_SET(dst, val) xgene_set_bits(dst, val, 0, 16) ++ ++#define ENET_SPARE_CFG_REG_ADDR 0x0750 ++#define RSIF_CONFIG_REG_ADDR 0x0010 ++#define RSIF_RAM_DBG_REG0_ADDR 0x0048 ++#define RGMII_REG_0_ADDR 0x07e0 ++#define CFG_LINK_AGGR_RESUME_0_ADDR 0x07c8 ++#define DEBUG_REG_ADDR 0x0700 ++#define CFG_BYPASS_ADDR 0x0294 ++#define CLE_BYPASS_REG0_0_ADDR 0x0490 ++#define CLE_BYPASS_REG1_0_ADDR 0x0494 ++#define CFG_RSIF_FPBUFF_TIMEOUT_EN BIT(31) ++#define RESUME_TX BIT(0) ++#define CFG_SPEED_1250 BIT(24) ++#define TX_PORT0 BIT(0) ++#define CFG_BYPASS_UNISEC_TX BIT(2) ++#define CFG_BYPASS_UNISEC_RX BIT(1) ++#define CFG_CLE_BYPASS_EN0 BIT(31) ++#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3) ++ ++#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) ++#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) ++#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) ++#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) ++#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) ++#define ICM_CONFIG0_REG_0_ADDR 0x0400 ++#define ICM_CONFIG2_REG_0_ADDR 0x0410 ++#define RX_DV_GATE_REG_0_ADDR 0x05fc ++#define TX_DV_GATE_EN0 BIT(2) ++#define RX_DV_GATE_EN0 BIT(1) ++#define RESUME_RX0 BIT(0) ++#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 ++#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc ++#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 ++#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR 0xf4 ++#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70 ++#define ENET_BLOCK_MEM_RDY_ADDR 0x74 ++#define MAC_CONFIG_1_ADDR 0x00 ++#define MAC_CONFIG_2_ADDR 0x04 ++#define MAX_FRAME_LEN_ADDR 0x10 ++#define INTERFACE_CONTROL_ADDR 0x38 ++#define STATION_ADDR0_ADDR 0x40 ++#define STATION_ADDR1_ADDR 0x44 ++#define SCAN_CYCLE_MASK_SET(dst, src) xgene_set_bits(dst, val, 0, 1) ++#define PHY_ADDR_SET(dst, val) xgene_set_bits(dst, val, 8, 5) ++#define REG_ADDR_SET(dst, val) xgene_set_bits(dst, val, 0, 5) ++#define ENET_INTERFACE_MODE2_SET(dst, val) xgene_set_bits(dst, val, 8, 2) ++#define MGMT_CLOCK_SEL_SET(dst, val) xgene_set_bits(dst, val, 0, 3) ++#define SOFT_RESET1 BIT(31) ++#define TX_EN BIT(0) ++#define RX_EN BIT(2) ++#define ENET_LHD_MODE BIT(25) ++#define ENET_GHD_MODE BIT(26) ++#define FULL_DUPLEX2 BIT(0) ++#define SCAN_AUTO_INCR BIT(5) ++#define TBYT_ADDR 0x38 ++#define TPKT_ADDR 0x39 ++#define TDRP_ADDR 0x45 ++#define TFCS_ADDR 0x47 ++#define TUND_ADDR 0x4a ++ ++#define TSO_IPPROTO_TCP 1 ++#define TSO_IPPROTO_UDP 0 ++#define FULL_DUPLEX 2 ++ ++#define USERINFO_POS 0 ++#define USERINFO_LEN 32 ++#define FPQNUM_POS 32 ++#define FPQNUM_LEN 12 ++#define LERR_POS 60 ++#define LERR_LEN 3 ++#define STASH_POS 52 ++#define STASH_LEN 2 ++#define BUFDATALEN_POS 48 ++#define BUFDATALEN_LEN 12 ++#define DATAADDR_POS 0 ++#define DATAADDR_LEN 42 ++#define COHERENT_POS 63 ++#define HENQNUM_POS 48 ++#define HENQNUM_LEN 12 ++#define TYPESEL_POS 44 ++#define TYPESEL_LEN 4 ++#define ETHHDR_POS 12 ++#define IC_POS 35 /* Insert CRC */ ++#define TCPHDR_POS 0 ++#define TCPHDR_LEN 6 ++#define IPHDR_POS 6 ++#define IPHDR_LEN 6 ++#define EC_POS 22 /* Enable checksum */ ++#define IS_POS 24 /* IP protocol select */ ++ ++#define DATAADDR_MASK CREATE_MASK_ULL(DATAADDR_POS, DATAADDR_LEN) ++#define BUFDATALEN_MASK CREATE_MASK_ULL(BUFDATALEN_POS, BUFDATALEN_LEN) ++#define USERINFO_MASK CREATE_MASK_ULL(USERINFO_POS, USERINFO_LEN) ++#define FPQNUM_MASK CREATE_MASK_ULL(FPQNUM_POS, FPQNUM_LEN) ++#define LERR_MASK CREATE_MASK_ULL(LERR_POS, LERR_LEN) ++#define STASHING_MASK CREATE_MASK_ULL(STASH_POS, STASH_LEN) ++#define COHERENT_MASK BIT_ULL(COHERENT_POS) ++#define HENQNUM_MASK CREATE_MASK_ULL(HENQNUM_POS, HENQNUM_LEN) ++#define TCPHDR_MASK CREATE_MASK(TCPHDR_POS, TCPHDR_LEN) ++#define IPHDR_MASK CREATE_MASK(IPHDR_POS, IPHDR_LEN) ++#define EC_MASK BIT(EC_POS) ++#define IS_MASK BIT(IS_POS) ++#define INSERT_CRC BIT_ULL(IC_POS) ++#define TYPE_ETH_WORK_MESSAGE BIT_ULL(44) ++ ++struct xgene_enet_desc { ++ dma_addr_t dataaddr; ++ u16 bufdatalen; ++ u32 userinfo; ++ u64 hopinfo_lsb; ++ u16 henqnum; ++ u16 fpqnum; ++ u8 stash; ++ u8 status; ++}; ++ ++struct xgene_enet_raw_desc { ++ u64 m0; ++ u64 m1; ++ u64 m2; ++ u64 m3; ++}; ++ ++struct xgene_enet_raw_desc16 { ++ u64 m0; ++ u64 m1; ++}; ++ ++static inline void xgene_enet_cpu_to_le64(void *desc_ptr, int count) ++{ ++ u64 *desc = desc_ptr; ++ int i; ++ ++ for (i = 0; i < count; i++) ++ desc[i] = cpu_to_le64(desc[i]); ++} ++ ++static inline void xgene_enet_le64_to_cpu(void *desc_ptr, int count) ++{ ++ u64 *desc = desc_ptr; ++ int i; ++ ++ for (i = 0; i < count; i++) ++ desc[i] = le64_to_cpu(desc[i]); ++} ++ ++static inline void xgene_enet_desc16_to_le64(void *desc_ptr) ++{ ++ u64 *desc; ++ ++ desc = desc_ptr; ++ desc[1] = cpu_to_le64(desc[1]); ++} ++ ++static inline void xgene_enet_le64_to_desc16(void *desc_ptr) ++{ ++ u64 *desc; ++ ++ desc = desc_ptr; ++ desc[1] = le64_to_cpu(desc[1]); ++} ++ ++enum xgene_enet_ring_cfgsize { ++ RING_CFGSIZE_512B, ++ RING_CFGSIZE_2KB, ++ RING_CFGSIZE_16KB, ++ RING_CFGSIZE_64KB, ++ RING_CFGSIZE_512KB, ++ RING_CFGSIZE_INVALID ++}; ++ ++enum xgene_enet_ring_type { ++ RING_DISABLED, ++ RING_REGULAR, ++ RING_BUFPOOL ++}; ++ ++enum xgene_ring_owner { ++ RING_OWNER_ETH0, ++ RING_OWNER_CPU = 15, ++ RING_OWNER_INVALID ++}; ++ ++enum xgene_enet_ring_bufnum { ++ RING_BUFNUM_REGULAR = 0x0, ++ RING_BUFNUM_BUFPOOL = 0x20, ++ RING_BUFNUM_INVALID ++}; ++ ++enum xgene_enet_cmd { ++ XGENE_ENET_WR_CMD = BIT(31), ++ XGENE_ENET_RD_CMD = BIT(30) ++}; ++ ++enum xgene_enet_err_code { ++ HBF_READ_DATA = 3, ++ HBF_LL_READ = 4, ++ BAD_WORK_MSG = 6, ++ BUFPOOL_TIMEOUT = 15, ++ INGRESS_CRC = 16, ++ INGRESS_CHECKSUM = 17, ++ INGRESS_TRUNC_FRAME = 18, ++ INGRESS_PKT_LEN = 19, ++ INGRESS_PKT_UNDER = 20, ++ INGRESS_FIFO_OVERRUN = 21, ++ INGRESS_CHECKSUM_COMPUTE = 26, ++ ERR_CODE_INVALID ++}; ++ ++static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id) ++{ ++ return (id & RING_OWNER_MASK) >> 6; ++} ++ ++static inline u8 xgene_enet_ring_bufnum(u16 id) ++{ ++ return id & RING_BUFNUM_MASK; ++} ++ ++static inline bool xgene_enet_is_bufpool(u16 id) ++{ ++ return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; ++} ++ ++static inline u16 xgene_enet_get_numslots(u16 id, u32 size) ++{ ++ bool is_bufpool = xgene_enet_is_bufpool(id); ++ ++ return (is_bufpool) ? size / BUFPOOL_DESC_SIZE : ++ size / WORK_DESC_SIZE; ++} ++ ++struct xgene_enet_desc_ring *xgene_enet_setup_ring( ++ struct xgene_enet_desc_ring *ring); ++void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring); ++ ++void xgene_set_tx_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc *raw_desc); ++void xgene_get_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc *raw_desc); ++void xgene_set_init_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc); ++void xgene_set_refill_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc); ++void xgene_get_bufpool_desc(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_raw_desc16 *raw_desc); ++u64 xgene_prepare_eth_work_msg(u8 l4hlen, u8 l3hlen, u8 ethhdr, ++ u8 csum_enable, u8 proto); ++void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, ++ struct xgene_enet_pdata *pdata, ++ enum xgene_enet_err_code status); ++ ++void xgene_enet_reset(struct xgene_enet_pdata *priv); ++void xgene_gmac_reset(struct xgene_enet_pdata *priv); ++void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed); ++void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv); ++void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv); ++void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv); ++void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv); ++void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata); ++void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, ++ u32 dst_ring_num, u16 bufpool_id); ++void xgene_gport_shutdown(struct xgene_enet_pdata *priv); ++void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata); ++ ++int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); ++int xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); ++ ++#endif /* __XGENE_ENET_HW_H__ */ +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +new file mode 100644 +index 0000000..09881a0 +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +@@ -0,0 +1,939 @@ ++/* Applied Micro X-Gene SoC Ethernet Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Authors: Iyappan Subramanian ++ * Ravi Patel ++ * Keyur Chudgar ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include "xgene_enet_main.h" ++#include "xgene_enet_hw.h" ++ ++static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) ++{ ++ struct xgene_enet_raw_desc16 *raw_desc; ++ int i; ++ ++ for (i = 0; i < buf_pool->slots; i++) { ++ raw_desc = &buf_pool->raw_desc16[i]; ++ ++ buf_pool->desc.userinfo = i; ++ buf_pool->desc.fpqnum = buf_pool->dst_ring_num; ++ buf_pool->desc.stash = 1; ++ ++ xgene_set_init_bufpool_desc(buf_pool, raw_desc); ++ ++ /* Hardware expects descriptor in little endian format */ ++ xgene_enet_cpu_to_le64(raw_desc, 4); ++ } ++} ++ ++static struct device *ndev_to_dev(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ ++ return &pdata->pdev->dev; ++} ++ ++static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, ++ u32 nbuf) ++{ ++ struct sk_buff *skb; ++ struct xgene_enet_raw_desc16 *raw_desc; ++ struct net_device *ndev; ++ struct device *dev; ++ dma_addr_t dma_addr; ++ u32 tail = buf_pool->tail; ++ u32 slots = buf_pool->slots - 1; ++ u16 bufdatalen, len; ++ int i; ++ ++ ndev = buf_pool->ndev; ++ dev = ndev_to_dev(buf_pool->ndev); ++ bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); ++ len = XGENE_ENET_MAX_MTU; ++ ++ for (i = 0; i < nbuf; i++) { ++ raw_desc = &buf_pool->raw_desc16[tail]; ++ ++ skb = netdev_alloc_skb_ip_align(ndev, len); ++ if (unlikely(!skb)) ++ return -ENOMEM; ++ buf_pool->rx_skb[tail] = skb; ++ ++ dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, dma_addr)) { ++ netdev_err(ndev, "DMA mapping error\n"); ++ dev_kfree_skb_any(skb); ++ return -EINVAL; ++ } ++ ++ buf_pool->desc.dataaddr = dma_addr; ++ buf_pool->desc.bufdatalen = bufdatalen; ++ ++ xgene_set_refill_bufpool_desc(buf_pool, raw_desc); ++ ++ xgene_enet_desc16_to_le64(raw_desc); ++ tail = (tail + 1) & slots; ++ } ++ ++ iowrite32(nbuf, buf_pool->cmd); ++ buf_pool->tail = tail; ++ ++ return 0; ++} ++ ++static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); ++ ++ return ((u16)pdata->rm << 10) | ring->num; ++} ++ ++static u8 xgene_enet_hdr_len(const void *data) ++{ ++ const struct ethhdr *eth = data; ++ ++ return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; ++} ++ ++static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) ++{ ++ u32 *cmd_base = ring->cmd_base; ++ u32 ring_state, num_msgs; ++ ++ ring_state = ioread32(&cmd_base[1]); ++ num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN); ++ ++ return num_msgs >> NUMMSGSINQ_POS; ++} ++ ++static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) ++{ ++ struct xgene_enet_raw_desc16 *raw_desc; ++ u32 slots = buf_pool->slots - 1; ++ u32 tail = buf_pool->tail; ++ u32 userinfo; ++ int i, len; ++ ++ len = xgene_enet_ring_len(buf_pool); ++ for (i = 0; i < len; i++) { ++ tail = (tail - 1) & slots; ++ raw_desc = &buf_pool->raw_desc16[tail]; ++ ++ /* Hardware stores descriptor in little endian format */ ++ xgene_enet_le64_to_desc16(raw_desc); ++ ++ xgene_get_bufpool_desc(buf_pool, raw_desc); ++ userinfo = buf_pool->desc.userinfo; ++ dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); ++ } ++ ++ iowrite32(-len, buf_pool->cmd); ++ buf_pool->tail = tail; ++} ++ ++irqreturn_t xgene_enet_rx_irq(const int irq, void *data) ++{ ++ struct xgene_enet_desc_ring *rx_ring = data; ++ ++ if (napi_schedule_prep(&rx_ring->napi)) { ++ disable_irq_nosync(irq); ++ __napi_schedule(&rx_ring->napi); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring) ++{ ++ struct xgene_enet_desc *desc; ++ struct sk_buff *skb; ++ struct device *dev; ++ u16 skb_index; ++ int ret = 0; ++ ++ desc = &cp_ring->desc; ++ skb_index = desc->userinfo; ++ skb = cp_ring->cp_skb[skb_index]; ++ ++ dev = ndev_to_dev(cp_ring->ndev); ++ dma_unmap_single(dev, desc->dataaddr, desc->bufdatalen, DMA_TO_DEVICE); ++ ++ /* Checking for error */ ++ if (unlikely(desc->status > 2)) { ++ xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), ++ desc->status); ++ ret = -1; ++ } ++ ++ if (likely(skb)) { ++ dev_kfree_skb_any(skb); ++ } else { ++ netdev_err(cp_ring->ndev, "completion skb is NULL\n"); ++ ret = -1; ++ } ++ ++ return ret; ++} ++ ++static u64 xgene_enet_work_msg(struct sk_buff *skb) ++{ ++ struct iphdr *iph; ++ u8 l3hlen, l4hlen = 0; ++ u8 csum_enable = 0; ++ u8 proto = 0; ++ u8 ethhdr; ++ u64 hopinfo; ++ ++ if (unlikely(skb->protocol != htons(ETH_P_IP)) && ++ unlikely(skb->protocol != htons(ETH_P_8021Q))) ++ goto out; ++ ++ if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) ++ goto out; ++ ++ iph = ip_hdr(skb); ++ if (unlikely(iph->frag_off & htons(IP_MF | IP_OFFSET))) ++ goto out; ++ ++ if (likely(iph->protocol == IPPROTO_TCP)) { ++ l4hlen = tcp_hdrlen(skb) / 4; ++ csum_enable = 1; ++ proto = TSO_IPPROTO_TCP; ++ } else if (iph->protocol == IPPROTO_UDP) { ++ l4hlen = UDP_HDR_SIZE; ++ csum_enable = 1; ++ proto = TSO_IPPROTO_UDP; ++ } ++out: ++ l3hlen = ip_hdrlen(skb) >> 2; ++ ethhdr = xgene_enet_hdr_len(skb->data); ++ hopinfo = xgene_prepare_eth_work_msg(l4hlen, l3hlen, ethhdr, ++ csum_enable, proto); ++ ++ return hopinfo; ++} ++ ++static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, ++ struct sk_buff *skb) ++{ ++ struct device *dev = ndev_to_dev(tx_ring->ndev); ++ struct xgene_enet_raw_desc *raw_desc; ++ dma_addr_t dma_addr; ++ u16 tail = tx_ring->tail; ++ u64 hopinfo; ++ ++ raw_desc = &tx_ring->raw_desc[tail]; ++ memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); ++ ++ dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, dma_addr)) { ++ netdev_err(tx_ring->ndev, "DMA mapping error\n"); ++ return -EINVAL; ++ } ++ ++ tx_ring->desc.dataaddr = dma_addr; ++ tx_ring->desc.bufdatalen = skb->len; ++ tx_ring->desc.henqnum = tx_ring->dst_ring_num; ++ tx_ring->desc.userinfo = tail; ++ ++ hopinfo = xgene_enet_work_msg(skb); ++ tx_ring->desc.hopinfo_lsb = hopinfo; ++ ++ xgene_set_tx_desc(tx_ring, raw_desc); ++ ++ /* Hardware expects descriptor in little endian format */ ++ xgene_enet_cpu_to_le64(raw_desc, 4); ++ tx_ring->cp_ring->cp_skb[tail] = skb; ++ ++ return 0; ++} ++ ++static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, ++ struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; ++ struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; ++ u32 tx_level, cq_level; ++ ++ tx_level = xgene_enet_ring_len(tx_ring); ++ cq_level = xgene_enet_ring_len(cp_ring); ++ if (unlikely(tx_level > pdata->tx_qcnt_hi || ++ cq_level > pdata->cp_qcnt_hi)) { ++ netif_stop_queue(ndev); ++ return NETDEV_TX_BUSY; ++ } ++ ++ if (xgene_enet_setup_tx_desc(tx_ring, skb)) { ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++ } ++ ++ iowrite32(1, tx_ring->cmd); ++ skb_tx_timestamp(skb); ++ tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); ++ ++ pdata->stats.tx_packets++; ++ pdata->stats.tx_bytes += skb->len; ++ ++ return NETDEV_TX_OK; ++} ++ ++void xgene_enet_skip_csum(struct sk_buff *skb) ++{ ++ struct iphdr *iph = ip_hdr(skb); ++ ++ if (!(iph->frag_off & htons(IP_MF | IP_OFFSET)) || ++ (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ } ++} ++ ++static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring) ++{ ++ struct net_device *ndev; ++ struct xgene_enet_pdata *pdata; ++ struct device *dev; ++ struct xgene_enet_desc_ring *buf_pool; ++ u32 datalen, skb_index; ++ struct sk_buff *skb; ++ struct xgene_enet_desc *desc; ++ int ret = 0; ++ ++ ndev = rx_ring->ndev; ++ pdata = netdev_priv(ndev); ++ dev = ndev_to_dev(rx_ring->ndev); ++ buf_pool = rx_ring->buf_pool; ++ ++ desc = &rx_ring->desc; ++ dma_unmap_single(dev, desc->dataaddr, XGENE_ENET_MAX_MTU, ++ DMA_FROM_DEVICE); ++ ++ skb_index = desc->userinfo; ++ skb = buf_pool->rx_skb[skb_index]; ++ ++ /* checking for error */ ++ if (unlikely(desc->status > 2)) { ++ dev_kfree_skb_any(skb); ++ xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), ++ desc->status); ++ pdata->stats.rx_dropped++; ++ ret = -1; ++ goto out; ++ } ++ ++ /* strip off CRC as HW isn't doing this */ ++ datalen = desc->bufdatalen; ++ datalen -= 4; ++ prefetch(skb->data - NET_IP_ALIGN); ++ skb_put(skb, datalen); ++ ++ skb_checksum_none_assert(skb); ++ skb->protocol = eth_type_trans(skb, ndev); ++ if (likely((ndev->features & NETIF_F_IP_CSUM) && ++ skb->protocol == htons(ETH_P_IP))) { ++ xgene_enet_skip_csum(skb); ++ } ++ ++ pdata->stats.rx_packets++; ++ pdata->stats.rx_bytes += datalen; ++ napi_gro_receive(&rx_ring->napi, skb); ++out: ++ if (--rx_ring->nbufpool == 0) { ++ ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); ++ rx_ring->nbufpool = NUM_BUFPOOL; ++ } ++ ++ return ret; ++} ++ ++static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, ++ int budget) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); ++ struct xgene_enet_raw_desc *raw_desc; ++ u16 head = ring->head; ++ u16 slots = ring->slots - 1; ++ int ret, count = 0; ++ ++ do { ++ raw_desc = &ring->raw_desc[head]; ++ if (unlikely(((u64 *)raw_desc)[EMPTY_SLOT_INDEX] == EMPTY_SLOT)) ++ break; ++ ++ /* Hardware stores descriptor in little endian format */ ++ xgene_enet_le64_to_cpu(raw_desc, 4); ++ xgene_get_desc(ring, raw_desc); ++ if (ring->desc.fpqnum) ++ ret = xgene_enet_rx_frame(ring); ++ else ++ ret = xgene_enet_tx_completion(ring); ++ ((u64 *)raw_desc)[EMPTY_SLOT_INDEX] = EMPTY_SLOT; ++ ++ head = (head + 1) & slots; ++ count++; ++ ++ if (ret) ++ break; ++ } while (--budget); ++ ++ if (likely(count)) { ++ iowrite32(-count, ring->cmd); ++ ring->head = head; ++ ++ if (netif_queue_stopped(ring->ndev)) { ++ if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low) ++ netif_wake_queue(ring->ndev); ++ } ++ } ++ ++ return budget; ++} ++ ++static int xgene_enet_napi(struct napi_struct *napi, const int budget) ++{ ++ struct xgene_enet_desc_ring *ring; ++ int processed; ++ ++ ring = container_of(napi, struct xgene_enet_desc_ring, napi); ++ processed = xgene_enet_process_ring(ring, budget); ++ ++ if (processed != budget) { ++ napi_complete(napi); ++ enable_irq(ring->irq); ++ } ++ ++ return processed; ++} ++ ++static void xgene_enet_timeout(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ ++ xgene_gmac_reset(pdata); ++} ++ ++static int xgene_enet_register_irq(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct device *dev = &pdata->pdev->dev; ++ int ret; ++ ++ ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq, ++ IRQF_SHARED, ndev->name, pdata->rx_ring); ++ if (ret) { ++ netdev_err(ndev, "rx%d interrupt request failed\n", ++ pdata->rx_ring->irq); ++ } ++ ++ return ret; ++} ++ ++static void xgene_enet_free_irq(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata; ++ struct device *dev; ++ ++ pdata = netdev_priv(ndev); ++ dev = &pdata->pdev->dev; ++ devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); ++} ++ ++static int xgene_enet_open(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ int ret; ++ ++ xgene_gmac_tx_enable(pdata); ++ xgene_gmac_rx_enable(pdata); ++ ++ ret = xgene_enet_register_irq(ndev); ++ if (ret) ++ return ret; ++ napi_enable(&pdata->rx_ring->napi); ++ ++ if (pdata->phy_dev) ++ phy_start(pdata->phy_dev); ++ ++ netif_start_queue(ndev); ++ ++ return ret; ++} ++ ++static int xgene_enet_close(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ ++ netif_stop_queue(ndev); ++ ++ if (pdata->phy_dev) ++ phy_stop(pdata->phy_dev); ++ ++ napi_disable(&pdata->rx_ring->napi); ++ xgene_enet_free_irq(ndev); ++ xgene_enet_process_ring(pdata->rx_ring, -1); ++ ++ xgene_gmac_tx_disable(pdata); ++ xgene_gmac_rx_disable(pdata); ++ ++ return 0; ++} ++ ++static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) ++{ ++ struct xgene_enet_pdata *pdata; ++ struct device *dev; ++ ++ pdata = netdev_priv(ring->ndev); ++ dev = &pdata->pdev->dev; ++ ++ xgene_enet_clear_ring(ring); ++ dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); ++ devm_kfree(dev, ring); ++} ++ ++static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) ++{ ++ struct device *dev = &pdata->pdev->dev; ++ struct xgene_enet_desc_ring *buf_pool; ++ ++ if (pdata->tx_ring) { ++ xgene_enet_delete_ring(pdata->tx_ring); ++ pdata->tx_ring = NULL; ++ } ++ ++ if (pdata->rx_ring) { ++ buf_pool = pdata->rx_ring->buf_pool; ++ xgene_enet_delete_bufpool(buf_pool); ++ xgene_enet_delete_ring(buf_pool); ++ devm_kfree(dev, buf_pool->rx_skb); ++ ++ xgene_enet_delete_ring(pdata->rx_ring); ++ pdata->rx_ring = NULL; ++ } ++} ++ ++static int xgene_enet_get_ring_size(struct device *dev, ++ enum xgene_enet_ring_cfgsize cfgsize) ++{ ++ int size = -EINVAL; ++ ++ switch (cfgsize) { ++ case RING_CFGSIZE_512B: ++ size = 0x200; ++ break; ++ case RING_CFGSIZE_2KB: ++ size = 0x800; ++ break; ++ case RING_CFGSIZE_16KB: ++ size = 0x4000; ++ break; ++ case RING_CFGSIZE_64KB: ++ size = 0x10000; ++ break; ++ case RING_CFGSIZE_512KB: ++ size = 0x80000; ++ break; ++ default: ++ dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); ++ break; ++ } ++ ++ return size; ++} ++ ++static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( ++ struct net_device *ndev, u32 ring_num, ++ enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) ++{ ++ struct xgene_enet_desc_ring *ring; ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct device *dev = &pdata->pdev->dev; ++ u32 size; ++ ++ ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), ++ GFP_KERNEL); ++ if (!ring) ++ return NULL; ++ ++ ring->ndev = ndev; ++ ring->num = ring_num; ++ ring->cfgsize = cfgsize; ++ ring->id = ring_id; ++ ++ size = xgene_enet_get_ring_size(dev, cfgsize); ++ ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, ++ GFP_KERNEL); ++ if (!ring->desc_addr) ++ goto err; ++ ring->size = size; ++ ++ ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); ++ ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; ++ pdata->rm = RM3; ++ ring = xgene_enet_setup_ring(ring); ++ netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", ++ ring->num, ring->size, ring->id, ring->slots); ++ ++ return ring; ++err: ++ dma_free_coherent(dev, size, ring->desc_addr, ring->dma); ++ devm_kfree(dev, ring); ++ ++ return NULL; ++} ++ ++static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) ++{ ++ return (owner << 6) | (bufnum & GENMASK(5, 0)); ++} ++ ++static int xgene_enet_create_desc_rings(struct net_device *ndev) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct device *dev = &pdata->pdev->dev; ++ struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; ++ struct xgene_enet_desc_ring *buf_pool = NULL; ++ u8 cpu_bufnum = 0, eth_bufnum = 0; ++ u8 bp_bufnum = 0x20; ++ u16 ring_id, ring_num = 0; ++ int ret; ++ ++ /* allocate rx descriptor ring */ ++ ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); ++ rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, ++ RING_CFGSIZE_16KB, ring_id); ++ if (IS_ERR_OR_NULL(rx_ring)) { ++ ret = PTR_ERR(rx_ring); ++ goto err; ++ } ++ ++ /* allocate buffer pool for receiving packets */ ++ ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); ++ buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, ++ RING_CFGSIZE_2KB, ring_id); ++ if (IS_ERR_OR_NULL(buf_pool)) { ++ ret = PTR_ERR(buf_pool); ++ goto err; ++ } ++ ++ rx_ring->nbufpool = NUM_BUFPOOL; ++ rx_ring->buf_pool = buf_pool; ++ rx_ring->irq = pdata->rx_irq; ++ buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, ++ sizeof(struct sk_buff *), GFP_KERNEL); ++ if (!buf_pool->rx_skb) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); ++ rx_ring->buf_pool = buf_pool; ++ pdata->rx_ring = rx_ring; ++ ++ /* allocate tx descriptor ring */ ++ ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); ++ tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, ++ RING_CFGSIZE_16KB, ring_id); ++ if (IS_ERR_OR_NULL(tx_ring)) { ++ ret = PTR_ERR(tx_ring); ++ goto err; ++ } ++ pdata->tx_ring = tx_ring; ++ ++ cp_ring = pdata->rx_ring; ++ cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, ++ sizeof(struct sk_buff *), GFP_KERNEL); ++ if (!cp_ring->cp_skb) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ pdata->tx_ring->cp_ring = cp_ring; ++ pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); ++ ++ pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; ++ pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; ++ pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; ++ ++ return 0; ++ ++err: ++ xgene_enet_delete_desc_rings(pdata); ++ return ret; ++} ++ ++static struct rtnl_link_stats64 *xgene_enet_get_stats64( ++ struct net_device *ndev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ struct rtnl_link_stats64 *stats = &pdata->stats; ++ ++ spin_lock(&pdata->stats_lock); ++ stats->rx_errors += stats->rx_length_errors + ++ stats->rx_crc_errors + ++ stats->rx_frame_errors + ++ stats->rx_fifo_errors; ++ memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); ++ spin_unlock(&pdata->stats_lock); ++ ++ return storage; ++} ++ ++static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) ++{ ++ struct xgene_enet_pdata *pdata = netdev_priv(ndev); ++ int ret; ++ ++ ret = eth_mac_addr(ndev, addr); ++ if (ret) ++ return ret; ++ xgene_gmac_set_mac_addr(pdata); ++ ++ return ret; ++} ++ ++static const struct net_device_ops xgene_ndev_ops = { ++ .ndo_open = xgene_enet_open, ++ .ndo_stop = xgene_enet_close, ++ .ndo_start_xmit = xgene_enet_start_xmit, ++ .ndo_tx_timeout = xgene_enet_timeout, ++ .ndo_get_stats64 = xgene_enet_get_stats64, ++ .ndo_change_mtu = eth_change_mtu, ++ .ndo_set_mac_address = xgene_enet_set_mac_address, ++}; ++ ++static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) ++{ ++ struct platform_device *pdev; ++ struct net_device *ndev; ++ struct device *dev; ++ struct resource *res; ++ void *base_addr; ++ const char *mac; ++ int ret; ++ ++ pdev = pdata->pdev; ++ dev = &pdev->dev; ++ ndev = pdata->ndev; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); ++ if (!res) { ++ dev_err(dev, "Resource enet_csr not defined\n"); ++ return -ENODEV; ++ } ++ pdata->base_addr = devm_ioremap_resource(dev, res); ++ if (IS_ERR(pdata->base_addr)) { ++ dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); ++ return PTR_ERR(pdata->base_addr); ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); ++ if (!res) { ++ dev_err(dev, "Resource ring_csr not defined\n"); ++ return -ENODEV; ++ } ++ pdata->ring_csr_addr = devm_ioremap_resource(dev, res); ++ if (IS_ERR(pdata->ring_csr_addr)) { ++ dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); ++ return PTR_ERR(pdata->ring_csr_addr); ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); ++ if (!res) { ++ dev_err(dev, "Resource ring_cmd not defined\n"); ++ return -ENODEV; ++ } ++ pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); ++ if (IS_ERR(pdata->ring_cmd_addr)) { ++ dev_err(dev, "Unable to retrieve ENET Ring command region\n"); ++ return PTR_ERR(pdata->ring_cmd_addr); ++ } ++ ++ ret = platform_get_irq(pdev, 0); ++ if (ret <= 0) { ++ dev_err(dev, "Unable to get ENET Rx IRQ\n"); ++ ret = ret ? : -ENXIO; ++ return ret; ++ } ++ pdata->rx_irq = ret; ++ ++ mac = of_get_mac_address(dev->of_node); ++ if (mac) ++ memcpy(ndev->dev_addr, mac, ndev->addr_len); ++ else ++ eth_hw_addr_random(ndev); ++ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ++ ++ pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); ++ if (pdata->phy_mode < 0) { ++ dev_err(dev, "Incorrect phy-connection-type in DTS\n"); ++ return -EINVAL; ++ } ++ ++ pdata->clk = devm_clk_get(&pdev->dev, NULL); ++ ret = IS_ERR(pdata->clk); ++ if (IS_ERR(pdata->clk)) { ++ dev_err(&pdev->dev, "can't get clock\n"); ++ ret = PTR_ERR(pdata->clk); ++ return ret; ++ } ++ ++ base_addr = pdata->base_addr; ++ pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; ++ pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; ++ pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; ++ pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; ++ pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET; ++ pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; ++ pdata->rx_buff_cnt = NUM_PKT_BUF; ++ ++ return ret; ++} ++ ++static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ++{ ++ struct net_device *ndev = pdata->ndev; ++ struct xgene_enet_desc_ring *buf_pool; ++ u16 dst_ring_num; ++ int ret; ++ ++ xgene_gmac_tx_disable(pdata); ++ xgene_gmac_rx_disable(pdata); ++ ++ ret = xgene_enet_create_desc_rings(ndev); ++ if (ret) { ++ netdev_err(ndev, "Error in ring configuration\n"); ++ return ret; ++ } ++ ++ /* setup buffer pool */ ++ buf_pool = pdata->rx_ring->buf_pool; ++ xgene_enet_init_bufpool(buf_pool); ++ ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); ++ if (ret) ++ return ret; ++ ++ dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); ++ xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id); ++ ++ return ret; ++} ++ ++static int xgene_enet_probe(struct platform_device *pdev) ++{ ++ struct net_device *ndev; ++ struct xgene_enet_pdata *pdata; ++ struct device *dev = &pdev->dev; ++ struct napi_struct *napi; ++ int ret; ++ ++ ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); ++ if (!ndev) ++ return -ENOMEM; ++ ++ pdata = netdev_priv(ndev); ++ ++ pdata->pdev = pdev; ++ pdata->ndev = ndev; ++ SET_NETDEV_DEV(ndev, dev); ++ platform_set_drvdata(pdev, pdata); ++ ndev->netdev_ops = &xgene_ndev_ops; ++ xgene_enet_set_ethtool_ops(ndev); ++ ndev->features |= NETIF_F_IP_CSUM | ++ NETIF_F_GSO | ++ NETIF_F_GRO; ++ ++ ret = xgene_enet_get_resources(pdata); ++ if (ret) ++ goto err; ++ ++ xgene_enet_reset(pdata); ++ xgene_gmac_init(pdata, SPEED_1000); ++ ++ spin_lock_init(&pdata->stats_lock); ++ ret = register_netdev(ndev); ++ if (ret) { ++ netdev_err(ndev, "Failed to register netdev\n"); ++ goto err; ++ } ++ ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ netdev_err(ndev, "No usable DMA configuration\n"); ++ goto err; ++ } ++ ++ ret = xgene_enet_init_hw(pdata); ++ if (ret) ++ goto err; ++ ++ napi = &pdata->rx_ring->napi; ++ netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); ++ ret = xgene_enet_mdio_config(pdata); ++ ++ return ret; ++err: ++ free_netdev(ndev); ++ return ret; ++} ++ ++static int xgene_enet_remove(struct platform_device *pdev) ++{ ++ struct xgene_enet_pdata *pdata; ++ struct net_device *ndev; ++ ++ pdata = platform_get_drvdata(pdev); ++ ndev = pdata->ndev; ++ ++ xgene_gmac_rx_disable(pdata); ++ xgene_gmac_tx_disable(pdata); ++ ++ netif_napi_del(&pdata->rx_ring->napi); ++ xgene_enet_mdio_remove(pdata); ++ xgene_enet_delete_desc_rings(pdata); ++ unregister_netdev(ndev); ++ xgene_gport_shutdown(pdata); ++ free_netdev(ndev); ++ ++ return 0; ++} ++ ++static struct of_device_id xgene_enet_match[] = { ++ {.compatible = "apm,xgene-enet",}, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, xgene_enet_match); ++ ++static struct platform_driver xgene_enet_driver = { ++ .driver = { ++ .name = "xgene-enet", ++ .owner = THIS_MODULE, ++ .of_match_table = xgene_enet_match, ++ }, ++ .probe = xgene_enet_probe, ++ .remove = xgene_enet_remove, ++}; ++ ++module_platform_driver(xgene_enet_driver); ++ ++MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); ++MODULE_VERSION(XGENE_DRV_VERSION); ++MODULE_AUTHOR("Keyur Chudgar "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +new file mode 100644 +index 0000000..2d1bd85 +--- /dev/null ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +@@ -0,0 +1,109 @@ ++/* Applied Micro X-Gene SoC Ethernet Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Authors: Iyappan Subramanian ++ * Ravi Patel ++ * Keyur Chudgar ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#ifndef __XGENE_ENET_MAIN_H__ ++#define __XGENE_ENET_MAIN_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "xgene_enet_hw.h" ++ ++#define XGENE_DRV_VERSION "v1.0" ++#define XGENE_ENET_MAX_MTU 1536 ++#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) ++#define NUM_PKT_BUF 64 ++#define NUM_BUFPOOL 32 ++ ++/* software context of a descriptor ring */ ++struct xgene_enet_desc_ring { ++ struct net_device *ndev; ++ u16 id; ++ u16 num; ++ u16 head; ++ u16 tail; ++ u16 slots; ++ u16 irq; ++ u32 size; ++ u32 state[NUM_RING_CONFIG]; ++ void __iomem *cmd_base; ++ void __iomem *cmd; ++ dma_addr_t dma; ++ u16 dst_ring_num; ++ u8 nbufpool; ++ struct sk_buff *(*rx_skb); ++ struct sk_buff *(*cp_skb); ++ enum xgene_enet_ring_cfgsize cfgsize; ++ struct xgene_enet_desc_ring *cp_ring; ++ struct xgene_enet_desc_ring *buf_pool; ++ struct napi_struct napi; ++ struct xgene_enet_desc desc; ++ union { ++ void *desc_addr; ++ struct xgene_enet_raw_desc *raw_desc; ++ struct xgene_enet_raw_desc16 *raw_desc16; ++ }; ++}; ++ ++/* ethernet private data */ ++struct xgene_enet_pdata { ++ struct net_device *ndev; ++ struct mii_bus *mdio_bus; ++ struct phy_device *phy_dev; ++ int phy_link; ++ int phy_speed; ++ struct clk *clk; ++ struct platform_device *pdev; ++ struct xgene_enet_desc_ring *tx_ring; ++ struct xgene_enet_desc_ring *rx_ring; ++ char *dev_name; ++ u32 rx_buff_cnt; ++ u32 tx_qcnt_hi; ++ u32 cp_qcnt_hi; ++ u32 cp_qcnt_low; ++ u32 rx_irq; ++ void __iomem *eth_csr_addr; ++ void __iomem *eth_ring_if_addr; ++ void __iomem *eth_diag_csr_addr; ++ void __iomem *mcx_mac_addr; ++ void __iomem *mcx_stats_addr; ++ void __iomem *mcx_mac_csr_addr; ++ void __iomem *base_addr; ++ void __iomem *ring_csr_addr; ++ void __iomem *ring_cmd_addr; ++ u32 phy_addr; ++ int phy_mode; ++ u32 speed; ++ u16 rm; ++ struct rtnl_link_stats64 stats; ++ /* statistics lock */ ++ spinlock_t stats_lock; ++}; ++ ++void xgene_enet_set_ethtool_ops(struct net_device *netdev); ++ ++#endif /* __XGENE_ENET_MAIN_H__ */ +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig +index 0754f5c..4478a59 100644 +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -789,7 +789,7 @@ config RTC_DRV_DA9063 + + config RTC_DRV_EFI + tristate "EFI RTC" +- depends on IA64 ++ depends on EFI + help + If you say yes here you will get support for the EFI + Real Time Clock. +diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile +index 70347d0..f1dfc36 100644 +--- a/drivers/rtc/Makefile ++++ b/drivers/rtc/Makefile +@@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_SYSTOHC) += systohc.o + obj-$(CONFIG_RTC_CLASS) += rtc-core.o + rtc-core-y := class.o interface.o + ++ifdef CONFIG_RTC_DRV_EFI ++rtc-core-y += rtc-efi-platform.o ++endif ++ + rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o + rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o + rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o +diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c +new file mode 100644 +index 0000000..1a7f890 +--- /dev/null ++++ b/drivers/rtc/rtc-efi-platform.c +@@ -0,0 +1,30 @@ ++/* ++ * Moved from arch/ia64/kernel/time.c ++ * ++ * Copyright (C) 1998-2003 Hewlett-Packard Co ++ * Stephane Eranian ++ * David Mosberger ++ * Copyright (C) 1999 Don Dugger ++ * Copyright (C) 1999-2000 VA Linux Systems ++ * Copyright (C) 1999-2000 Walt Drummond ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++static struct platform_device rtc_efi_dev = { ++ .name = "rtc-efi", ++ .id = -1, ++}; ++ ++static int __init rtc_init(void) ++{ ++ if (platform_device_register(&rtc_efi_dev) < 0) ++ pr_err("unable to register rtc device...\n"); ++ ++ /* not necessarily an error */ ++ return 0; ++} ++module_init(rtc_init); +diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h +index f27000f..35b0c12 100644 +--- a/include/kvm/arm_vgic.h ++++ b/include/kvm/arm_vgic.h +@@ -24,7 +24,6 @@ + #include + #include + #include +-#include + + #define VGIC_NR_IRQS 256 + #define VGIC_NR_SGIS 16 +@@ -32,7 +31,9 @@ + #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) + #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) + #define VGIC_MAX_CPUS KVM_MAX_VCPUS +-#define VGIC_MAX_LRS (1 << 6) ++ ++#define VGIC_V2_MAX_LRS (1 << 6) ++#define VGIC_V3_MAX_LRS 16 + + /* Sanity checks... */ + #if (VGIC_MAX_CPUS > 8) +@@ -68,9 +69,62 @@ struct vgic_bytemap { + u32 shared[VGIC_NR_SHARED_IRQS / 4]; + }; + ++struct kvm_vcpu; ++ ++enum vgic_type { ++ VGIC_V2, /* Good ol' GICv2 */ ++ VGIC_V3, /* New fancy GICv3 */ ++}; ++ ++#define LR_STATE_PENDING (1 << 0) ++#define LR_STATE_ACTIVE (1 << 1) ++#define LR_STATE_MASK (3 << 0) ++#define LR_EOI_INT (1 << 2) ++ ++struct vgic_lr { ++ u16 irq; ++ u8 source; ++ u8 state; ++}; ++ ++struct vgic_vmcr { ++ u32 ctlr; ++ u32 abpr; ++ u32 bpr; ++ u32 pmr; ++}; ++ ++struct vgic_ops { ++ struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); ++ void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); ++ void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); ++ u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); ++ u64 (*get_eisr)(const struct kvm_vcpu *vcpu); ++ u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); ++ void (*enable_underflow)(struct kvm_vcpu *vcpu); ++ void (*disable_underflow)(struct kvm_vcpu *vcpu); ++ void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); ++ void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); ++ void (*enable)(struct kvm_vcpu *vcpu); ++}; ++ ++struct vgic_params { ++ /* vgic type */ ++ enum vgic_type type; ++ /* Physical address of vgic virtual cpu interface */ ++ phys_addr_t vcpu_base; ++ /* Number of list registers */ ++ u32 nr_lr; ++ /* Interrupt number */ ++ unsigned int maint_irq; ++ /* Virtual control interface base address */ ++ void __iomem *vctrl_base; ++}; ++ + struct vgic_dist { + #ifdef CONFIG_KVM_ARM_VGIC + spinlock_t lock; ++ bool in_kernel; + bool ready; + + /* Virtual control interface mapping */ +@@ -110,6 +164,29 @@ struct vgic_dist { + #endif + }; + ++struct vgic_v2_cpu_if { ++ u32 vgic_hcr; ++ u32 vgic_vmcr; ++ u32 vgic_misr; /* Saved only */ ++ u32 vgic_eisr[2]; /* Saved only */ ++ u32 vgic_elrsr[2]; /* Saved only */ ++ u32 vgic_apr; ++ u32 vgic_lr[VGIC_V2_MAX_LRS]; ++}; ++ ++struct vgic_v3_cpu_if { ++#ifdef CONFIG_ARM_GIC_V3 ++ u32 vgic_hcr; ++ u32 vgic_vmcr; ++ u32 vgic_misr; /* Saved only */ ++ u32 vgic_eisr; /* Saved only */ ++ u32 vgic_elrsr; /* Saved only */ ++ u32 vgic_ap0r[4]; ++ u32 vgic_ap1r[4]; ++ u64 vgic_lr[VGIC_V3_MAX_LRS]; ++#endif ++}; ++ + struct vgic_cpu { + #ifdef CONFIG_KVM_ARM_VGIC + /* per IRQ to LR mapping */ +@@ -120,24 +197,24 @@ struct vgic_cpu { + DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); + + /* Bitmap of used/free list registers */ +- DECLARE_BITMAP( lr_used, VGIC_MAX_LRS); ++ DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); + + /* Number of list registers on this CPU */ + int nr_lr; + + /* CPU vif control registers for world switch */ +- u32 vgic_hcr; +- u32 vgic_vmcr; +- u32 vgic_misr; /* Saved only */ +- u32 vgic_eisr[2]; /* Saved only */ +- u32 vgic_elrsr[2]; /* Saved only */ +- u32 vgic_apr; +- u32 vgic_lr[VGIC_MAX_LRS]; ++ union { ++ struct vgic_v2_cpu_if vgic_v2; ++ struct vgic_v3_cpu_if vgic_v3; ++ }; + #endif + }; + + #define LR_EMPTY 0xff + ++#define INT_STATUS_EOI (1 << 0) ++#define INT_STATUS_UNDERFLOW (1 << 1) ++ + struct kvm; + struct kvm_vcpu; + struct kvm_run; +@@ -157,9 +234,25 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); + bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_exit_mmio *mmio); + +-#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) ++#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) + #define vgic_initialized(k) ((k)->arch.vgic.ready) + ++int vgic_v2_probe(struct device_node *vgic_node, ++ const struct vgic_ops **ops, ++ const struct vgic_params **params); ++#ifdef CONFIG_ARM_GIC_V3 ++int vgic_v3_probe(struct device_node *vgic_node, ++ const struct vgic_ops **ops, ++ const struct vgic_params **params); ++#else ++static inline int vgic_v3_probe(struct device_node *vgic_node, ++ const struct vgic_ops **ops, ++ const struct vgic_params **params) ++{ ++ return -ENODEV; ++} ++#endif ++ + #else + static inline int kvm_vgic_hyp_init(void) + { +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 41bbf8b..b3fac7c 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #include + +@@ -875,6 +876,9 @@ extern void efi_reserve_boot_services(void); + extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); + extern struct efi_memory_map memmap; + ++extern int efi_reboot_quirk_mode; ++extern bool efi_poweroff_required(void); ++ + /* Iterate through an efi_memory_map */ + #define for_each_efi_memory_desc(m, md) \ + for ((md) = (m)->map; \ +@@ -926,11 +930,14 @@ static inline bool efi_enabled(int feature) + { + return test_bit(feature, &efi.flags) != 0; + } ++extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); + #else + static inline bool efi_enabled(int feature) + { + return false; + } ++static inline void ++efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} + #endif + + /* +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h +new file mode 100644 +index 0000000..9eac712 +--- /dev/null ++++ b/include/linux/irqchip/arm-gic-v3.h +@@ -0,0 +1,193 @@ ++/* ++ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. ++ * Author: Marc Zyngier ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H ++#define __LINUX_IRQCHIP_ARM_GIC_V3_H ++ ++/* ++ * Distributor registers. We assume we're running non-secure, with ARE ++ * being set. Secure-only and non-ARE registers are not described. ++ */ ++#define GICD_CTLR 0x0000 ++#define GICD_TYPER 0x0004 ++#define GICD_IIDR 0x0008 ++#define GICD_STATUSR 0x0010 ++#define GICD_SETSPI_NSR 0x0040 ++#define GICD_CLRSPI_NSR 0x0048 ++#define GICD_SETSPI_SR 0x0050 ++#define GICD_CLRSPI_SR 0x0058 ++#define GICD_SEIR 0x0068 ++#define GICD_ISENABLER 0x0100 ++#define GICD_ICENABLER 0x0180 ++#define GICD_ISPENDR 0x0200 ++#define GICD_ICPENDR 0x0280 ++#define GICD_ISACTIVER 0x0300 ++#define GICD_ICACTIVER 0x0380 ++#define GICD_IPRIORITYR 0x0400 ++#define GICD_ICFGR 0x0C00 ++#define GICD_IROUTER 0x6000 ++#define GICD_PIDR2 0xFFE8 ++ ++#define GICD_CTLR_RWP (1U << 31) ++#define GICD_CTLR_ARE_NS (1U << 4) ++#define GICD_CTLR_ENABLE_G1A (1U << 1) ++#define GICD_CTLR_ENABLE_G1 (1U << 0) ++ ++#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) ++#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) ++ ++#define GIC_PIDR2_ARCH_MASK 0xf0 ++ ++/* ++ * Re-Distributor registers, offsets from RD_base ++ */ ++#define GICR_CTLR GICD_CTLR ++#define GICR_IIDR 0x0004 ++#define GICR_TYPER 0x0008 ++#define GICR_STATUSR GICD_STATUSR ++#define GICR_WAKER 0x0014 ++#define GICR_SETLPIR 0x0040 ++#define GICR_CLRLPIR 0x0048 ++#define GICR_SEIR GICD_SEIR ++#define GICR_PROPBASER 0x0070 ++#define GICR_PENDBASER 0x0078 ++#define GICR_INVLPIR 0x00A0 ++#define GICR_INVALLR 0x00B0 ++#define GICR_SYNCR 0x00C0 ++#define GICR_MOVLPIR 0x0100 ++#define GICR_MOVALLR 0x0110 ++#define GICR_PIDR2 GICD_PIDR2 ++ ++#define GICR_WAKER_ProcessorSleep (1U << 1) ++#define GICR_WAKER_ChildrenAsleep (1U << 2) ++ ++/* ++ * Re-Distributor registers, offsets from SGI_base ++ */ ++#define GICR_ISENABLER0 GICD_ISENABLER ++#define GICR_ICENABLER0 GICD_ICENABLER ++#define GICR_ISPENDR0 GICD_ISPENDR ++#define GICR_ICPENDR0 GICD_ICPENDR ++#define GICR_ISACTIVER0 GICD_ISACTIVER ++#define GICR_ICACTIVER0 GICD_ICACTIVER ++#define GICR_IPRIORITYR0 GICD_IPRIORITYR ++#define GICR_ICFGR0 GICD_ICFGR ++ ++#define GICR_TYPER_VLPIS (1U << 1) ++#define GICR_TYPER_LAST (1U << 4) ++ ++/* ++ * CPU interface registers ++ */ ++#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) ++#define ICC_CTLR_EL1_EOImode_drop (1U << 1) ++#define ICC_SRE_EL1_SRE (1U << 0) ++ ++/* ++ * Hypervisor interface registers (SRE only) ++ */ ++#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) ++ ++#define ICH_LR_EOI (1UL << 41) ++#define ICH_LR_GROUP (1UL << 60) ++#define ICH_LR_STATE (3UL << 62) ++#define ICH_LR_PENDING_BIT (1UL << 62) ++#define ICH_LR_ACTIVE_BIT (1UL << 63) ++ ++#define ICH_MISR_EOI (1 << 0) ++#define ICH_MISR_U (1 << 1) ++ ++#define ICH_HCR_EN (1 << 0) ++#define ICH_HCR_UIE (1 << 1) ++ ++#define ICH_VMCR_CTLR_SHIFT 0 ++#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) ++#define ICH_VMCR_BPR1_SHIFT 18 ++#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) ++#define ICH_VMCR_BPR0_SHIFT 21 ++#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) ++#define ICH_VMCR_PMR_SHIFT 24 ++#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) ++ ++#define ICC_EOIR1_EL1 S3_0_C12_C12_1 ++#define ICC_IAR1_EL1 S3_0_C12_C12_0 ++#define ICC_SGI1R_EL1 S3_0_C12_C11_5 ++#define ICC_PMR_EL1 S3_0_C4_C6_0 ++#define ICC_CTLR_EL1 S3_0_C12_C12_4 ++#define ICC_SRE_EL1 S3_0_C12_C12_5 ++#define ICC_GRPEN1_EL1 S3_0_C12_C12_7 ++ ++#define ICC_SRE_EL2 S3_4_C12_C9_5 ++ ++#define ICC_SRE_EL2_ENABLE (1 << 3) ++ ++/* ++ * System register definitions ++ */ ++#define ICH_VSEIR_EL2 S3_4_C12_C9_4 ++#define ICH_HCR_EL2 S3_4_C12_C11_0 ++#define ICH_VTR_EL2 S3_4_C12_C11_1 ++#define ICH_MISR_EL2 S3_4_C12_C11_2 ++#define ICH_EISR_EL2 S3_4_C12_C11_3 ++#define ICH_ELSR_EL2 S3_4_C12_C11_5 ++#define ICH_VMCR_EL2 S3_4_C12_C11_7 ++ ++#define __LR0_EL2(x) S3_4_C12_C12_ ## x ++#define __LR8_EL2(x) S3_4_C12_C13_ ## x ++ ++#define ICH_LR0_EL2 __LR0_EL2(0) ++#define ICH_LR1_EL2 __LR0_EL2(1) ++#define ICH_LR2_EL2 __LR0_EL2(2) ++#define ICH_LR3_EL2 __LR0_EL2(3) ++#define ICH_LR4_EL2 __LR0_EL2(4) ++#define ICH_LR5_EL2 __LR0_EL2(5) ++#define ICH_LR6_EL2 __LR0_EL2(6) ++#define ICH_LR7_EL2 __LR0_EL2(7) ++#define ICH_LR8_EL2 __LR8_EL2(0) ++#define ICH_LR9_EL2 __LR8_EL2(1) ++#define ICH_LR10_EL2 __LR8_EL2(2) ++#define ICH_LR11_EL2 __LR8_EL2(3) ++#define ICH_LR12_EL2 __LR8_EL2(4) ++#define ICH_LR13_EL2 __LR8_EL2(5) ++#define ICH_LR14_EL2 __LR8_EL2(6) ++#define ICH_LR15_EL2 __LR8_EL2(7) ++ ++#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x ++#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) ++#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) ++#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) ++#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) ++ ++#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x ++#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) ++#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) ++#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) ++#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++ ++static inline void gic_write_eoir(u64 irq) ++{ ++ asm volatile("msr " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); ++ isb(); ++} ++ ++#endif ++ ++#endif +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 20d17f8..0ea758b 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page) + set_page_count(p, 0); + } while (++p, --i); + +- set_page_refcounted(page); + set_pageblock_migratetype(page, MIGRATE_CMA); +- __free_pages(page, pageblock_order); ++ ++ if (pageblock_order >= MAX_ORDER) { ++ i = pageblock_nr_pages; ++ p = page; ++ do { ++ set_page_refcounted(p); ++ __free_pages(p, MAX_ORDER - 1); ++ p += MAX_ORDER_NR_PAGES; ++ } while (i -= MAX_ORDER_NR_PAGES); ++ } else { ++ set_page_refcounted(page); ++ __free_pages(page, pageblock_order); ++ } ++ + adjust_managed_page_count(page, pageblock_nr_pages); + } + #endif +diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h +index e9441b9..1d3f39c 100644 +--- a/tools/perf/arch/arm64/include/perf_regs.h ++++ b/tools/perf/arch/arm64/include/perf_regs.h +@@ -6,6 +6,8 @@ + #include + + #define PERF_REGS_MASK ((1ULL << PERF_REG_ARM64_MAX) - 1) ++#define PERF_REGS_MAX PERF_REG_ARM64_MAX ++ + #define PERF_REG_IP PERF_REG_ARM64_PC + #define PERF_REG_SP PERF_REG_ARM64_SP + +diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c +new file mode 100644 +index 0000000..a55a9a4 +--- /dev/null ++++ b/virt/kvm/arm/vgic-v2.c +@@ -0,0 +1,236 @@ ++/* ++ * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved. ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) ++{ ++ struct vgic_lr lr_desc; ++ u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; ++ ++ lr_desc.irq = val & GICH_LR_VIRTUALID; ++ if (lr_desc.irq <= 15) ++ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; ++ else ++ lr_desc.source = 0; ++ lr_desc.state = 0; ++ ++ if (val & GICH_LR_PENDING_BIT) ++ lr_desc.state |= LR_STATE_PENDING; ++ if (val & GICH_LR_ACTIVE_BIT) ++ lr_desc.state |= LR_STATE_ACTIVE; ++ if (val & GICH_LR_EOI) ++ lr_desc.state |= LR_EOI_INT; ++ ++ return lr_desc; ++} ++ ++/* ++ * This also does some maintenance of ELRSR. ++ */ ++static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr lr_desc) ++{ ++ u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq; ++ ++ if (lr_desc.state & LR_STATE_PENDING) ++ lr_val |= GICH_LR_PENDING_BIT; ++ if (lr_desc.state & LR_STATE_ACTIVE) ++ lr_val |= GICH_LR_ACTIVE_BIT; ++ if (lr_desc.state & LR_EOI_INT) ++ lr_val |= GICH_LR_EOI; ++ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; ++} ++ ++static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr lr_desc) ++{ ++ if (!(lr_desc.state & LR_STATE_MASK)) ++ set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); ++} ++ ++static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) ++{ ++ const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; ++ return *(u64 *)elrsr; ++} ++ ++static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) ++{ ++ const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; ++ return *(u64 *)eisr; ++} ++ ++static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) ++{ ++ u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; ++ u32 ret = 0; ++ ++ if (misr & GICH_MISR_EOI) ++ ret |= INT_STATUS_EOI; ++ if (misr & GICH_MISR_U) ++ ret |= INT_STATUS_UNDERFLOW; ++ ++ return ret; ++} ++ ++static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE; ++} ++ ++static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; ++} ++ ++static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ++{ ++ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; ++ ++ vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; ++ vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; ++ vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; ++ vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; ++} ++ ++static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ++{ ++ u32 vmcr; ++ ++ vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; ++ vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; ++ vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; ++ vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; ++ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; ++} ++ ++static void vgic_v2_enable(struct kvm_vcpu *vcpu) ++{ ++ /* ++ * By forcing VMCR to zero, the GIC will restore the binary ++ * points to their reset values. Anything else resets to zero ++ * anyway. ++ */ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; ++ ++ /* Get the show on the road... */ ++ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; ++} ++ ++static const struct vgic_ops vgic_v2_ops = { ++ .get_lr = vgic_v2_get_lr, ++ .set_lr = vgic_v2_set_lr, ++ .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, ++ .get_elrsr = vgic_v2_get_elrsr, ++ .get_eisr = vgic_v2_get_eisr, ++ .get_interrupt_status = vgic_v2_get_interrupt_status, ++ .enable_underflow = vgic_v2_enable_underflow, ++ .disable_underflow = vgic_v2_disable_underflow, ++ .get_vmcr = vgic_v2_get_vmcr, ++ .set_vmcr = vgic_v2_set_vmcr, ++ .enable = vgic_v2_enable, ++}; ++ ++static struct vgic_params vgic_v2_params; ++ ++/** ++ * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT ++ * @node: pointer to the DT node ++ * @ops: address of a pointer to the GICv2 operations ++ * @params: address of a pointer to HW-specific parameters ++ * ++ * Returns 0 if a GICv2 has been found, with the low level operations ++ * in *ops and the HW parameters in *params. Returns an error code ++ * otherwise. ++ */ ++int vgic_v2_probe(struct device_node *vgic_node, ++ const struct vgic_ops **ops, ++ const struct vgic_params **params) ++{ ++ int ret; ++ struct resource vctrl_res; ++ struct resource vcpu_res; ++ struct vgic_params *vgic = &vgic_v2_params; ++ ++ vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); ++ if (!vgic->maint_irq) { ++ kvm_err("error getting vgic maintenance irq from DT\n"); ++ ret = -ENXIO; ++ goto out; ++ } ++ ++ ret = of_address_to_resource(vgic_node, 2, &vctrl_res); ++ if (ret) { ++ kvm_err("Cannot obtain GICH resource\n"); ++ goto out; ++ } ++ ++ vgic->vctrl_base = of_iomap(vgic_node, 2); ++ if (!vgic->vctrl_base) { ++ kvm_err("Cannot ioremap GICH\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR); ++ vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1; ++ ++ ret = create_hyp_io_mappings(vgic->vctrl_base, ++ vgic->vctrl_base + resource_size(&vctrl_res), ++ vctrl_res.start); ++ if (ret) { ++ kvm_err("Cannot map VCTRL into hyp\n"); ++ goto out_unmap; ++ } ++ ++ if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { ++ kvm_err("Cannot obtain GICV resource\n"); ++ ret = -ENXIO; ++ goto out_unmap; ++ } ++ vgic->vcpu_base = vcpu_res.start; ++ ++ kvm_info("%s@%llx IRQ%d\n", vgic_node->name, ++ vctrl_res.start, vgic->maint_irq); ++ ++ vgic->type = VGIC_V2; ++ *ops = &vgic_v2_ops; ++ *params = vgic; ++ goto out; ++ ++out_unmap: ++ iounmap(vgic->vctrl_base); ++out: ++ of_node_put(vgic_node); ++ return ret; ++} +diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c +new file mode 100644 +index 0000000..f01d446 +--- /dev/null ++++ b/virt/kvm/arm/vgic-v3.c +@@ -0,0 +1,231 @@ ++/* ++ * Copyright (C) 2013 ARM Limited, All Rights Reserved. ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++/* These are for GICv2 emulation only */ ++#define GICH_LR_VIRTUALID (0x3ffUL << 0) ++#define GICH_LR_PHYSID_CPUID_SHIFT (10) ++#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) ++ ++/* ++ * LRs are stored in reverse order in memory. make sure we index them ++ * correctly. ++ */ ++#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) ++ ++static u32 ich_vtr_el2; ++ ++static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) ++{ ++ struct vgic_lr lr_desc; ++ u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; ++ ++ lr_desc.irq = val & GICH_LR_VIRTUALID; ++ if (lr_desc.irq <= 15) ++ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; ++ else ++ lr_desc.source = 0; ++ lr_desc.state = 0; ++ ++ if (val & ICH_LR_PENDING_BIT) ++ lr_desc.state |= LR_STATE_PENDING; ++ if (val & ICH_LR_ACTIVE_BIT) ++ lr_desc.state |= LR_STATE_ACTIVE; ++ if (val & ICH_LR_EOI) ++ lr_desc.state |= LR_EOI_INT; ++ ++ return lr_desc; ++} ++ ++static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr lr_desc) ++{ ++ u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | ++ lr_desc.irq); ++ ++ if (lr_desc.state & LR_STATE_PENDING) ++ lr_val |= ICH_LR_PENDING_BIT; ++ if (lr_desc.state & LR_STATE_ACTIVE) ++ lr_val |= ICH_LR_ACTIVE_BIT; ++ if (lr_desc.state & LR_EOI_INT) ++ lr_val |= ICH_LR_EOI; ++ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; ++} ++ ++static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr lr_desc) ++{ ++ if (!(lr_desc.state & LR_STATE_MASK)) ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); ++} ++ ++static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) ++{ ++ return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr; ++} ++ ++static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) ++{ ++ return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; ++} ++ ++static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) ++{ ++ u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; ++ u32 ret = 0; ++ ++ if (misr & ICH_MISR_EOI) ++ ret |= INT_STATUS_EOI; ++ if (misr & ICH_MISR_U) ++ ret |= INT_STATUS_UNDERFLOW; ++ ++ return ret; ++} ++ ++static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ++{ ++ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; ++ ++ vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; ++ vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; ++ vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; ++ vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; ++} ++ ++static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE; ++} ++ ++static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE; ++} ++ ++static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) ++{ ++ u32 vmcr; ++ ++ vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; ++ vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; ++ vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; ++ vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; ++ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; ++} ++ ++static void vgic_v3_enable(struct kvm_vcpu *vcpu) ++{ ++ /* ++ * By forcing VMCR to zero, the GIC will restore the binary ++ * points to their reset values. Anything else resets to zero ++ * anyway. ++ */ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0; ++ ++ /* Get the show on the road... */ ++ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN; ++} ++ ++static const struct vgic_ops vgic_v3_ops = { ++ .get_lr = vgic_v3_get_lr, ++ .set_lr = vgic_v3_set_lr, ++ .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, ++ .get_elrsr = vgic_v3_get_elrsr, ++ .get_eisr = vgic_v3_get_eisr, ++ .get_interrupt_status = vgic_v3_get_interrupt_status, ++ .enable_underflow = vgic_v3_enable_underflow, ++ .disable_underflow = vgic_v3_disable_underflow, ++ .get_vmcr = vgic_v3_get_vmcr, ++ .set_vmcr = vgic_v3_set_vmcr, ++ .enable = vgic_v3_enable, ++}; ++ ++static struct vgic_params vgic_v3_params; ++ ++/** ++ * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT ++ * @node: pointer to the DT node ++ * @ops: address of a pointer to the GICv3 operations ++ * @params: address of a pointer to HW-specific parameters ++ * ++ * Returns 0 if a GICv3 has been found, with the low level operations ++ * in *ops and the HW parameters in *params. Returns an error code ++ * otherwise. ++ */ ++int vgic_v3_probe(struct device_node *vgic_node, ++ const struct vgic_ops **ops, ++ const struct vgic_params **params) ++{ ++ int ret = 0; ++ u32 gicv_idx; ++ struct resource vcpu_res; ++ struct vgic_params *vgic = &vgic_v3_params; ++ ++ vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); ++ if (!vgic->maint_irq) { ++ kvm_err("error getting vgic maintenance irq from DT\n"); ++ ret = -ENXIO; ++ goto out; ++ } ++ ++ ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); ++ ++ /* ++ * The ListRegs field is 5 bits, but there is a architectural ++ * maximum of 16 list registers. Just ignore bit 4... ++ */ ++ vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1; ++ ++ if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx)) ++ gicv_idx = 1; ++ ++ gicv_idx += 3; /* Also skip GICD, GICC, GICH */ ++ if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) { ++ kvm_err("Cannot obtain GICV region\n"); ++ ret = -ENXIO; ++ goto out; ++ } ++ vgic->vcpu_base = vcpu_res.start; ++ vgic->vctrl_base = NULL; ++ vgic->type = VGIC_V3; ++ ++ kvm_info("%s@%llx IRQ%d\n", vgic_node->name, ++ vcpu_res.start, vgic->maint_irq); ++ ++ *ops = &vgic_v3_ops; ++ *params = vgic; ++ ++out: ++ of_node_put(vgic_node); ++ return ret; ++} +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c +index 56ff9be..795ab48 100644 +--- a/virt/kvm/arm/vgic.c ++++ b/virt/kvm/arm/vgic.c +@@ -76,14 +76,6 @@ + #define IMPLEMENTER_ARM 0x43b + #define GICC_ARCH_VERSION_V2 0x2 + +-/* Physical address of vgic virtual cpu interface */ +-static phys_addr_t vgic_vcpu_base; +- +-/* Virtual control interface base address */ +-static void __iomem *vgic_vctrl_base; +- +-static struct device_node *vgic_node; +- + #define ACCESS_READ_VALUE (1 << 0) + #define ACCESS_READ_RAZ (0 << 0) + #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) +@@ -94,12 +86,17 @@ static struct device_node *vgic_node; + #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) + + static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); ++static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); + static void vgic_update_state(struct kvm *kvm); + static void vgic_kick_vcpus(struct kvm *kvm); + static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); +-static u32 vgic_nr_lr; ++static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); ++static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); ++static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); ++static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); + +-static unsigned int vgic_maint_irq; ++static const struct vgic_ops *vgic_ops; ++static const struct vgic_params *vgic; + + static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, + int cpuid, u32 offset) +@@ -593,18 +590,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, + return false; + } + +-#define LR_CPUID(lr) \ +- (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) +-#define LR_IRQID(lr) \ +- ((lr) & GICH_LR_VIRTUALID) +- +-static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) +-{ +- clear_bit(lr_nr, vgic_cpu->lr_used); +- vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; +- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; +-} +- + /** + * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor + * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs +@@ -622,13 +607,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int vcpu_id = vcpu->vcpu_id; +- int i, irq, source_cpu; +- u32 *lr; ++ int i; + + for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { +- lr = &vgic_cpu->vgic_lr[i]; +- irq = LR_IRQID(*lr); +- source_cpu = LR_CPUID(*lr); ++ struct vgic_lr lr = vgic_get_lr(vcpu, i); + + /* + * There are three options for the state bits: +@@ -640,7 +622,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) + * If the LR holds only an active interrupt (not pending) then + * just leave it alone. + */ +- if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) ++ if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) + continue; + + /* +@@ -649,18 +631,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) + * is fine, then we are only setting a few bits that were + * already set. + */ +- vgic_dist_irq_set(vcpu, irq); +- if (irq < VGIC_NR_SGIS) +- dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; +- *lr &= ~GICH_LR_PENDING_BIT; ++ vgic_dist_irq_set(vcpu, lr.irq); ++ if (lr.irq < VGIC_NR_SGIS) ++ dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; ++ lr.state &= ~LR_STATE_PENDING; ++ vgic_set_lr(vcpu, i, lr); + + /* + * If there's no state left on the LR (it could still be + * active), then the LR does not hold any useful info and can + * be marked as free for other use. + */ +- if (!(*lr & GICH_LR_STATE)) +- vgic_retire_lr(i, irq, vgic_cpu); ++ if (!(lr.state & LR_STATE_MASK)) ++ vgic_retire_lr(i, lr.irq, vcpu); + + /* Finally update the VGIC state. */ + vgic_update_state(vcpu->kvm); +@@ -989,8 +972,73 @@ static void vgic_update_state(struct kvm *kvm) + } + } + +-#define MK_LR_PEND(src, irq) \ +- (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) ++static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) ++{ ++ return vgic_ops->get_lr(vcpu, lr); ++} ++ ++static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr vlr) ++{ ++ vgic_ops->set_lr(vcpu, lr, vlr); ++} ++ ++static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ++ struct vgic_lr vlr) ++{ ++ vgic_ops->sync_lr_elrsr(vcpu, lr, vlr); ++} ++ ++static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) ++{ ++ return vgic_ops->get_elrsr(vcpu); ++} ++ ++static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) ++{ ++ return vgic_ops->get_eisr(vcpu); ++} ++ ++static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) ++{ ++ return vgic_ops->get_interrupt_status(vcpu); ++} ++ ++static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vgic_ops->enable_underflow(vcpu); ++} ++ ++static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) ++{ ++ vgic_ops->disable_underflow(vcpu); ++} ++ ++static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) ++{ ++ vgic_ops->get_vmcr(vcpu, vmcr); ++} ++ ++static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) ++{ ++ vgic_ops->set_vmcr(vcpu, vmcr); ++} ++ ++static inline void vgic_enable(struct kvm_vcpu *vcpu) ++{ ++ vgic_ops->enable(vcpu); ++} ++ ++static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) ++{ ++ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; ++ struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); ++ ++ vlr.state = 0; ++ vgic_set_lr(vcpu, lr_nr, vlr); ++ clear_bit(lr_nr, vgic_cpu->lr_used); ++ vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; ++} + + /* + * An interrupt may have been disabled after being made pending on the +@@ -1006,13 +1054,13 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int lr; + +- for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { +- int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; ++ for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) { ++ struct vgic_lr vlr = vgic_get_lr(vcpu, lr); + +- if (!vgic_irq_is_enabled(vcpu, irq)) { +- vgic_retire_lr(lr, irq, vgic_cpu); +- if (vgic_irq_is_active(vcpu, irq)) +- vgic_irq_clear_active(vcpu, irq); ++ if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { ++ vgic_retire_lr(lr, vlr.irq, vcpu); ++ if (vgic_irq_is_active(vcpu, vlr.irq)) ++ vgic_irq_clear_active(vcpu, vlr.irq); + } + } + } +@@ -1024,6 +1072,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) + static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) + { + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; ++ struct vgic_lr vlr; + int lr; + + /* Sanitize the input... */ +@@ -1036,28 +1085,34 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) + lr = vgic_cpu->vgic_irq_lr_map[irq]; + + /* Do we have an active interrupt for the same CPUID? */ +- if (lr != LR_EMPTY && +- (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { +- kvm_debug("LR%d piggyback for IRQ%d %x\n", +- lr, irq, vgic_cpu->vgic_lr[lr]); +- BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); +- vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; +- return true; ++ if (lr != LR_EMPTY) { ++ vlr = vgic_get_lr(vcpu, lr); ++ if (vlr.source == sgi_source_id) { ++ kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); ++ BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); ++ vlr.state |= LR_STATE_PENDING; ++ vgic_set_lr(vcpu, lr, vlr); ++ return true; ++ } + } + + /* Try to use another LR for this interrupt */ + lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, +- vgic_cpu->nr_lr); +- if (lr >= vgic_cpu->nr_lr) ++ vgic->nr_lr); ++ if (lr >= vgic->nr_lr) + return false; + + kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); +- vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); + vgic_cpu->vgic_irq_lr_map[irq] = lr; + set_bit(lr, vgic_cpu->lr_used); + ++ vlr.irq = irq; ++ vlr.source = sgi_source_id; ++ vlr.state = LR_STATE_PENDING; + if (!vgic_irq_is_edge(vcpu, irq)) +- vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; ++ vlr.state |= LR_EOI_INT; ++ ++ vgic_set_lr(vcpu, lr, vlr); + + return true; + } +@@ -1155,9 +1210,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) + + epilog: + if (overflow) { +- vgic_cpu->vgic_hcr |= GICH_HCR_UIE; ++ vgic_enable_underflow(vcpu); + } else { +- vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; ++ vgic_disable_underflow(vcpu); + /* + * We're about to run this VCPU, and we've consumed + * everything the distributor had in store for +@@ -1170,44 +1225,46 @@ epilog: + + static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) + { +- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; ++ u32 status = vgic_get_interrupt_status(vcpu); + bool level_pending = false; + +- kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); ++ kvm_debug("STATUS = %08x\n", status); + +- if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { ++ if (status & INT_STATUS_EOI) { + /* + * Some level interrupts have been EOIed. Clear their + * active bit. + */ +- int lr, irq; ++ u64 eisr = vgic_get_eisr(vcpu); ++ unsigned long *eisr_ptr = (unsigned long *)&eisr; ++ int lr; + +- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, +- vgic_cpu->nr_lr) { +- irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; ++ for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { ++ struct vgic_lr vlr = vgic_get_lr(vcpu, lr); + +- vgic_irq_clear_active(vcpu, irq); +- vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; ++ vgic_irq_clear_active(vcpu, vlr.irq); ++ WARN_ON(vlr.state & LR_STATE_MASK); ++ vlr.state = 0; ++ vgic_set_lr(vcpu, lr, vlr); + + /* Any additional pending interrupt? */ +- if (vgic_dist_irq_is_pending(vcpu, irq)) { +- vgic_cpu_irq_set(vcpu, irq); ++ if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { ++ vgic_cpu_irq_set(vcpu, vlr.irq); + level_pending = true; + } else { +- vgic_cpu_irq_clear(vcpu, irq); ++ vgic_cpu_irq_clear(vcpu, vlr.irq); + } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ +- set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); +- vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; ++ vgic_sync_lr_elrsr(vcpu, lr, vlr); + } + } + +- if (vgic_cpu->vgic_misr & GICH_MISR_U) +- vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; ++ if (status & INT_STATUS_UNDERFLOW) ++ vgic_disable_underflow(vcpu); + + return level_pending; + } +@@ -1220,29 +1277,31 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) + { + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; ++ u64 elrsr; ++ unsigned long *elrsr_ptr; + int lr, pending; + bool level_pending; + + level_pending = vgic_process_maintenance(vcpu); ++ elrsr = vgic_get_elrsr(vcpu); ++ elrsr_ptr = (unsigned long *)&elrsr; + + /* Clear mappings for empty LRs */ +- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, +- vgic_cpu->nr_lr) { +- int irq; ++ for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { ++ struct vgic_lr vlr; + + if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) + continue; + +- irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; ++ vlr = vgic_get_lr(vcpu, lr); + +- BUG_ON(irq >= VGIC_NR_IRQS); +- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; ++ BUG_ON(vlr.irq >= VGIC_NR_IRQS); ++ vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; + } + + /* Check if we still have something up our sleeve... */ +- pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, +- vgic_cpu->nr_lr); +- if (level_pending || pending < vgic_cpu->nr_lr) ++ pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); ++ if (level_pending || pending < vgic->nr_lr) + set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); + } + +@@ -1432,21 +1491,20 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) + } + + /* +- * By forcing VMCR to zero, the GIC will restore the binary +- * points to their reset values. Anything else resets to zero +- * anyway. ++ * Store the number of LRs per vcpu, so we don't have to go ++ * all the way to the distributor structure to find out. Only ++ * assembly code should use this one. + */ +- vgic_cpu->vgic_vmcr = 0; ++ vgic_cpu->nr_lr = vgic->nr_lr; + +- vgic_cpu->nr_lr = vgic_nr_lr; +- vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ ++ vgic_enable(vcpu); + + return 0; + } + + static void vgic_init_maintenance_interrupt(void *info) + { +- enable_percpu_irq(vgic_maint_irq, 0); ++ enable_percpu_irq(vgic->maint_irq, 0); + } + + static int vgic_cpu_notify(struct notifier_block *self, +@@ -1459,7 +1517,7 @@ static int vgic_cpu_notify(struct notifier_block *self, + break; + case CPU_DYING: + case CPU_DYING_FROZEN: +- disable_percpu_irq(vgic_maint_irq); ++ disable_percpu_irq(vgic->maint_irq); + break; + } + +@@ -1470,30 +1528,37 @@ static struct notifier_block vgic_cpu_nb = { + .notifier_call = vgic_cpu_notify, + }; + ++static const struct of_device_id vgic_ids[] = { ++ { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, ++ { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, ++ {}, ++}; ++ + int kvm_vgic_hyp_init(void) + { ++ const struct of_device_id *matched_id; ++ int (*vgic_probe)(struct device_node *,const struct vgic_ops **, ++ const struct vgic_params **); ++ struct device_node *vgic_node; + int ret; +- struct resource vctrl_res; +- struct resource vcpu_res; + +- vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); ++ vgic_node = of_find_matching_node_and_match(NULL, ++ vgic_ids, &matched_id); + if (!vgic_node) { +- kvm_err("error: no compatible vgic node in DT\n"); ++ kvm_err("error: no compatible GIC node found\n"); + return -ENODEV; + } + +- vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); +- if (!vgic_maint_irq) { +- kvm_err("error getting vgic maintenance irq from DT\n"); +- ret = -ENXIO; +- goto out; +- } ++ vgic_probe = matched_id->data; ++ ret = vgic_probe(vgic_node, &vgic_ops, &vgic); ++ if (ret) ++ return ret; + +- ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, ++ ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler, + "vgic", kvm_get_running_vcpus()); + if (ret) { +- kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); +- goto out; ++ kvm_err("Cannot register interrupt %d\n", vgic->maint_irq); ++ return ret; + } + + ret = __register_cpu_notifier(&vgic_cpu_nb); +@@ -1502,49 +1567,15 @@ int kvm_vgic_hyp_init(void) + goto out_free_irq; + } + +- ret = of_address_to_resource(vgic_node, 2, &vctrl_res); +- if (ret) { +- kvm_err("Cannot obtain VCTRL resource\n"); +- goto out_free_irq; +- } +- +- vgic_vctrl_base = of_iomap(vgic_node, 2); +- if (!vgic_vctrl_base) { +- kvm_err("Cannot ioremap VCTRL\n"); +- ret = -ENOMEM; +- goto out_free_irq; +- } +- +- vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR); +- vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1; +- +- ret = create_hyp_io_mappings(vgic_vctrl_base, +- vgic_vctrl_base + resource_size(&vctrl_res), +- vctrl_res.start); +- if (ret) { +- kvm_err("Cannot map VCTRL into hyp\n"); +- goto out_unmap; +- } +- +- kvm_info("%s@%llx IRQ%d\n", vgic_node->name, +- vctrl_res.start, vgic_maint_irq); + on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); + +- if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { +- kvm_err("Cannot obtain VCPU resource\n"); +- ret = -ENXIO; +- goto out_unmap; +- } +- vgic_vcpu_base = vcpu_res.start; ++ /* Callback into for arch code for setup */ ++ vgic_arch_setup(vgic); + +- goto out; ++ return 0; + +-out_unmap: +- iounmap(vgic_vctrl_base); + out_free_irq: +- free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); +-out: +- of_node_put(vgic_node); ++ free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); + return ret; + } + +@@ -1577,7 +1608,7 @@ int kvm_vgic_init(struct kvm *kvm) + } + + ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, +- vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); ++ vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); + if (ret) { + kvm_err("Unable to remap VGIC CPU to VCPU\n"); + goto out; +@@ -1623,7 +1654,8 @@ int kvm_vgic_create(struct kvm *kvm) + } + + spin_lock_init(&kvm->arch.vgic.lock); +- kvm->arch.vgic.vctrl_base = vgic_vctrl_base; ++ kvm->arch.vgic.in_kernel = true; ++ kvm->arch.vgic.vctrl_base = vgic->vctrl_base; + kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; + +@@ -1722,39 +1754,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) + static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) + { +- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; +- u32 reg, mask = 0, shift = 0; + bool updated = false; ++ struct vgic_vmcr vmcr; ++ u32 *vmcr_field; ++ u32 reg; ++ ++ vgic_get_vmcr(vcpu, &vmcr); + + switch (offset & ~0x3) { + case GIC_CPU_CTRL: +- mask = GICH_VMCR_CTRL_MASK; +- shift = GICH_VMCR_CTRL_SHIFT; ++ vmcr_field = &vmcr.ctlr; + break; + case GIC_CPU_PRIMASK: +- mask = GICH_VMCR_PRIMASK_MASK; +- shift = GICH_VMCR_PRIMASK_SHIFT; ++ vmcr_field = &vmcr.pmr; + break; + case GIC_CPU_BINPOINT: +- mask = GICH_VMCR_BINPOINT_MASK; +- shift = GICH_VMCR_BINPOINT_SHIFT; ++ vmcr_field = &vmcr.bpr; + break; + case GIC_CPU_ALIAS_BINPOINT: +- mask = GICH_VMCR_ALIAS_BINPOINT_MASK; +- shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT; ++ vmcr_field = &vmcr.abpr; + break; ++ default: ++ BUG(); + } + + if (!mmio->is_write) { +- reg = (vgic_cpu->vgic_vmcr & mask) >> shift; ++ reg = *vmcr_field; + mmio_data_write(mmio, ~0, reg); + } else { + reg = mmio_data_read(mmio, ~0); +- reg = (reg << shift) & mask; +- if (reg != (vgic_cpu->vgic_vmcr & mask)) ++ if (reg != *vmcr_field) { ++ *vmcr_field = reg; ++ vgic_set_vmcr(vcpu, &vmcr); + updated = true; +- vgic_cpu->vgic_vmcr &= ~mask; +- vgic_cpu->vgic_vmcr |= reg; ++ } + } + return updated; + } diff --git a/kernel.spec b/kernel.spec index 61367f9..5c77c72 100644 --- a/kernel.spec +++ b/kernel.spec @@ -8,6 +8,8 @@ Summary: The Linux kernel # be 0. %global released_kernel 0 +%global aarch64patches 1 + # Sign modules on x86. Make sure the config files match this setting if more # architectures are added. %ifarch %{ix86} x86_64 @@ -40,7 +42,7 @@ Summary: The Linux kernel # For non-released -rc kernels, this will be appended after the rcX and # gitX tags, so a 3 here would become part of release "0.rcX.gitX.3" # -%global baserelease 1 +%global baserelease 2 %global fedora_build %{baserelease} # base_sublevel is the kernel version we're starting with and patching @@ -364,7 +366,11 @@ Summary: The Linux kernel # Which is a BadThing(tm). # We only build kernel-headers on the following... +%if 0%{?aarch64patches} %define nobuildarches i386 s390 +%else +%define nobuildarches i386 s390 aarch64 +%endif %ifarch %nobuildarches %define with_up 0 @@ -645,6 +651,9 @@ Patch25106: x86_32-entry-Do-syscall-exit-work-on-badsys.patch Patch25109: revert-input-wacom-testing-result-shows-get_report-is-unnecessary.patch +# git clone ssh://git.fedorahosted.org/git/kernel-arm64.git, git diff master...devel +Patch30000: kernel-arm64.patch + # END OF PATCH DEFINITIONS %endif @@ -1369,6 +1378,13 @@ ApplyPatch x86_32-entry-Do-syscall-exit-work-on-badsys.patch ApplyPatch revert-input-wacom-testing-result-shows-get_report-is-unnecessary.patch +%if 0%{?aarch64patches} +ApplyPatch kernel-arm64.patch +%ifnarch aarch64 # this is stupid, but i want to notice before secondary koji does. +ApplyPatch kernel-arm64.patch -R +%endif +%endif + # END OF PATCH APPLICATIONS %endif @@ -2241,6 +2257,15 @@ fi # ||----w | # || || %changelog +* Thu Jun 26 2014 Kyle McMartin - 3.16.0-0.rc2.git3.2 +- Add kernel-arm64.patch, which contains AArch64 support destined for upstream. + ssh://git.fedorahosted.org/git/kernel-arm64.git is Mark Salter's source tree + integrating these patches on the devel branch. I've added a twiddle to the + top of the spec file to disable the aarch64 patchset, and also set aarch64 + to nobuildarches, so we still get kernel-headers, but no one accidentally + installs a non-booting kernel if the patchset causes rejects during a + rebase. + * Thu Jun 26 2014 Josh Boyer - Trimmed changelog, see fedpkg git for earlier history.