mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-27 01:11:14 +00:00
9aa196e0f2
Refresh patches, following required reworking: ar71xx/patches-4.9/930-chipidea-pullup.patch layerscape/patches-4.9/302-dts-support-layercape.patch sunxi/patches-4.9/0052-stmmac-form-4-12.patch Fixes for CVEs: CVE-2018-1108 CVE-2018-1092 Tested on: ar71xx Archer C7 v2 Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk> Tested-by: Koen Vandeputte <koen.vandeputte@ncentric.com> Tested-by: Arjen de Korte <build+openwrt@de-korte.org>
1977 lines
55 KiB
Diff
1977 lines
55 KiB
Diff
From 2ab544f7e943c63c300933d34815e78451cc0c26 Mon Sep 17 00:00:00 2001
|
|
From: Yangbo Lu <yangbo.lu@nxp.com>
|
|
Date: Wed, 17 Jan 2018 15:37:56 +0800
|
|
Subject: [PATCH 25/30] qe: support layerscape
|
|
|
|
This is an integrated patch for layerscape qe support.
|
|
|
|
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
|
|
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
|
|
---
|
|
drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++++--------
|
|
drivers/net/wan/fsl_ucc_hdlc.c | 4 +-
|
|
drivers/soc/fsl/qe/Kconfig | 2 +-
|
|
drivers/soc/fsl/qe/Makefile | 2 +-
|
|
drivers/soc/fsl/qe/qe.c | 80 +++--
|
|
drivers/soc/fsl/qe/qe_ic.h | 103 ------
|
|
drivers/soc/fsl/qe/qe_io.c | 42 +--
|
|
drivers/soc/fsl/qe/qe_tdm.c | 8 +-
|
|
drivers/soc/fsl/qe/ucc.c | 10 +-
|
|
drivers/soc/fsl/qe/ucc_fast.c | 74 ++--
|
|
drivers/tty/serial/ucc_uart.c | 1 +
|
|
include/soc/fsl/qe/qe.h | 1 -
|
|
include/soc/fsl/qe/qe_ic.h | 139 --------
|
|
13 files changed, 359 insertions(+), 496 deletions(-)
|
|
rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
|
|
delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
|
|
delete mode 100644 include/soc/fsl/qe/qe_ic.h
|
|
|
|
--- a/drivers/soc/fsl/qe/qe_ic.c
|
|
+++ /dev/null
|
|
@@ -1,512 +0,0 @@
|
|
-/*
|
|
- * arch/powerpc/sysdev/qe_lib/qe_ic.c
|
|
- *
|
|
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
|
- *
|
|
- * Author: Li Yang <leoli@freescale.com>
|
|
- * Based on code from Shlomi Gridish <gridish@freescale.com>
|
|
- *
|
|
- * QUICC ENGINE Interrupt Controller
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or modify it
|
|
- * under the terms of the GNU General Public License as published by the
|
|
- * Free Software Foundation; either version 2 of the License, or (at your
|
|
- * option) any later version.
|
|
- */
|
|
-
|
|
-#include <linux/of_irq.h>
|
|
-#include <linux/of_address.h>
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/errno.h>
|
|
-#include <linux/reboot.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/stddef.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/signal.h>
|
|
-#include <linux/device.h>
|
|
-#include <linux/spinlock.h>
|
|
-#include <asm/irq.h>
|
|
-#include <asm/io.h>
|
|
-#include <soc/fsl/qe/qe_ic.h>
|
|
-
|
|
-#include "qe_ic.h"
|
|
-
|
|
-static DEFINE_RAW_SPINLOCK(qe_ic_lock);
|
|
-
|
|
-static struct qe_ic_info qe_ic_info[] = {
|
|
- [1] = {
|
|
- .mask = 0x00008000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 0,
|
|
- .pri_reg = QEIC_CIPWCC,
|
|
- },
|
|
- [2] = {
|
|
- .mask = 0x00004000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 1,
|
|
- .pri_reg = QEIC_CIPWCC,
|
|
- },
|
|
- [3] = {
|
|
- .mask = 0x00002000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 2,
|
|
- .pri_reg = QEIC_CIPWCC,
|
|
- },
|
|
- [10] = {
|
|
- .mask = 0x00000040,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 1,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [11] = {
|
|
- .mask = 0x00000020,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 2,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [12] = {
|
|
- .mask = 0x00000010,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 3,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [13] = {
|
|
- .mask = 0x00000008,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 4,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [14] = {
|
|
- .mask = 0x00000004,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 5,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [15] = {
|
|
- .mask = 0x00000002,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 6,
|
|
- .pri_reg = QEIC_CIPZCC,
|
|
- },
|
|
- [20] = {
|
|
- .mask = 0x10000000,
|
|
- .mask_reg = QEIC_CRIMR,
|
|
- .pri_code = 3,
|
|
- .pri_reg = QEIC_CIPRTA,
|
|
- },
|
|
- [25] = {
|
|
- .mask = 0x00800000,
|
|
- .mask_reg = QEIC_CRIMR,
|
|
- .pri_code = 0,
|
|
- .pri_reg = QEIC_CIPRTB,
|
|
- },
|
|
- [26] = {
|
|
- .mask = 0x00400000,
|
|
- .mask_reg = QEIC_CRIMR,
|
|
- .pri_code = 1,
|
|
- .pri_reg = QEIC_CIPRTB,
|
|
- },
|
|
- [27] = {
|
|
- .mask = 0x00200000,
|
|
- .mask_reg = QEIC_CRIMR,
|
|
- .pri_code = 2,
|
|
- .pri_reg = QEIC_CIPRTB,
|
|
- },
|
|
- [28] = {
|
|
- .mask = 0x00100000,
|
|
- .mask_reg = QEIC_CRIMR,
|
|
- .pri_code = 3,
|
|
- .pri_reg = QEIC_CIPRTB,
|
|
- },
|
|
- [32] = {
|
|
- .mask = 0x80000000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 0,
|
|
- .pri_reg = QEIC_CIPXCC,
|
|
- },
|
|
- [33] = {
|
|
- .mask = 0x40000000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 1,
|
|
- .pri_reg = QEIC_CIPXCC,
|
|
- },
|
|
- [34] = {
|
|
- .mask = 0x20000000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 2,
|
|
- .pri_reg = QEIC_CIPXCC,
|
|
- },
|
|
- [35] = {
|
|
- .mask = 0x10000000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 3,
|
|
- .pri_reg = QEIC_CIPXCC,
|
|
- },
|
|
- [36] = {
|
|
- .mask = 0x08000000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 4,
|
|
- .pri_reg = QEIC_CIPXCC,
|
|
- },
|
|
- [40] = {
|
|
- .mask = 0x00800000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 0,
|
|
- .pri_reg = QEIC_CIPYCC,
|
|
- },
|
|
- [41] = {
|
|
- .mask = 0x00400000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 1,
|
|
- .pri_reg = QEIC_CIPYCC,
|
|
- },
|
|
- [42] = {
|
|
- .mask = 0x00200000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 2,
|
|
- .pri_reg = QEIC_CIPYCC,
|
|
- },
|
|
- [43] = {
|
|
- .mask = 0x00100000,
|
|
- .mask_reg = QEIC_CIMR,
|
|
- .pri_code = 3,
|
|
- .pri_reg = QEIC_CIPYCC,
|
|
- },
|
|
-};
|
|
-
|
|
-static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
|
|
-{
|
|
- return in_be32(base + (reg >> 2));
|
|
-}
|
|
-
|
|
-static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
|
|
- u32 value)
|
|
-{
|
|
- out_be32(base + (reg >> 2), value);
|
|
-}
|
|
-
|
|
-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
|
|
-{
|
|
- return irq_get_chip_data(virq);
|
|
-}
|
|
-
|
|
-static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
|
|
-{
|
|
- return irq_data_get_irq_chip_data(d);
|
|
-}
|
|
-
|
|
-static void qe_ic_unmask_irq(struct irq_data *d)
|
|
-{
|
|
- struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
|
- unsigned int src = irqd_to_hwirq(d);
|
|
- unsigned long flags;
|
|
- u32 temp;
|
|
-
|
|
- raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
|
-
|
|
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
|
- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
|
- temp | qe_ic_info[src].mask);
|
|
-
|
|
- raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
|
-}
|
|
-
|
|
-static void qe_ic_mask_irq(struct irq_data *d)
|
|
-{
|
|
- struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
|
- unsigned int src = irqd_to_hwirq(d);
|
|
- unsigned long flags;
|
|
- u32 temp;
|
|
-
|
|
- raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
|
-
|
|
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
|
- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
|
- temp & ~qe_ic_info[src].mask);
|
|
-
|
|
- /* Flush the above write before enabling interrupts; otherwise,
|
|
- * spurious interrupts will sometimes happen. To be 100% sure
|
|
- * that the write has reached the device before interrupts are
|
|
- * enabled, the mask register would have to be read back; however,
|
|
- * this is not required for correctness, only to avoid wasting
|
|
- * time on a large number of spurious interrupts. In testing,
|
|
- * a sync reduced the observed spurious interrupts to zero.
|
|
- */
|
|
- mb();
|
|
-
|
|
- raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
|
-}
|
|
-
|
|
-static struct irq_chip qe_ic_irq_chip = {
|
|
- .name = "QEIC",
|
|
- .irq_unmask = qe_ic_unmask_irq,
|
|
- .irq_mask = qe_ic_mask_irq,
|
|
- .irq_mask_ack = qe_ic_mask_irq,
|
|
-};
|
|
-
|
|
-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
|
|
- enum irq_domain_bus_token bus_token)
|
|
-{
|
|
- /* Exact match, unless qe_ic node is NULL */
|
|
- struct device_node *of_node = irq_domain_get_of_node(h);
|
|
- return of_node == NULL || of_node == node;
|
|
-}
|
|
-
|
|
-static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
|
|
- irq_hw_number_t hw)
|
|
-{
|
|
- struct qe_ic *qe_ic = h->host_data;
|
|
- struct irq_chip *chip;
|
|
-
|
|
- if (hw >= ARRAY_SIZE(qe_ic_info)) {
|
|
- pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (qe_ic_info[hw].mask == 0) {
|
|
- printk(KERN_ERR "Can't map reserved IRQ\n");
|
|
- return -EINVAL;
|
|
- }
|
|
- /* Default chip */
|
|
- chip = &qe_ic->hc_irq;
|
|
-
|
|
- irq_set_chip_data(virq, qe_ic);
|
|
- irq_set_status_flags(virq, IRQ_LEVEL);
|
|
-
|
|
- irq_set_chip_and_handler(virq, chip, handle_level_irq);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const struct irq_domain_ops qe_ic_host_ops = {
|
|
- .match = qe_ic_host_match,
|
|
- .map = qe_ic_host_map,
|
|
- .xlate = irq_domain_xlate_onetwocell,
|
|
-};
|
|
-
|
|
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
|
|
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
|
|
-{
|
|
- int irq;
|
|
-
|
|
- BUG_ON(qe_ic == NULL);
|
|
-
|
|
- /* get the interrupt source vector. */
|
|
- irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
|
|
-
|
|
- if (irq == 0)
|
|
- return NO_IRQ;
|
|
-
|
|
- return irq_linear_revmap(qe_ic->irqhost, irq);
|
|
-}
|
|
-
|
|
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
|
|
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
|
|
-{
|
|
- int irq;
|
|
-
|
|
- BUG_ON(qe_ic == NULL);
|
|
-
|
|
- /* get the interrupt source vector. */
|
|
- irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
|
|
-
|
|
- if (irq == 0)
|
|
- return NO_IRQ;
|
|
-
|
|
- return irq_linear_revmap(qe_ic->irqhost, irq);
|
|
-}
|
|
-
|
|
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
|
|
- void (*low_handler)(struct irq_desc *desc),
|
|
- void (*high_handler)(struct irq_desc *desc))
|
|
-{
|
|
- struct qe_ic *qe_ic;
|
|
- struct resource res;
|
|
- u32 temp = 0, ret, high_active = 0;
|
|
-
|
|
- ret = of_address_to_resource(node, 0, &res);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
|
|
- if (qe_ic == NULL)
|
|
- return;
|
|
-
|
|
- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
|
|
- &qe_ic_host_ops, qe_ic);
|
|
- if (qe_ic->irqhost == NULL) {
|
|
- kfree(qe_ic);
|
|
- return;
|
|
- }
|
|
-
|
|
- qe_ic->regs = ioremap(res.start, resource_size(&res));
|
|
-
|
|
- qe_ic->hc_irq = qe_ic_irq_chip;
|
|
-
|
|
- qe_ic->virq_high = irq_of_parse_and_map(node, 0);
|
|
- qe_ic->virq_low = irq_of_parse_and_map(node, 1);
|
|
-
|
|
- if (qe_ic->virq_low == NO_IRQ) {
|
|
- printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
|
|
- kfree(qe_ic);
|
|
- return;
|
|
- }
|
|
-
|
|
- /* default priority scheme is grouped. If spread mode is */
|
|
- /* required, configure cicr accordingly. */
|
|
- if (flags & QE_IC_SPREADMODE_GRP_W)
|
|
- temp |= CICR_GWCC;
|
|
- if (flags & QE_IC_SPREADMODE_GRP_X)
|
|
- temp |= CICR_GXCC;
|
|
- if (flags & QE_IC_SPREADMODE_GRP_Y)
|
|
- temp |= CICR_GYCC;
|
|
- if (flags & QE_IC_SPREADMODE_GRP_Z)
|
|
- temp |= CICR_GZCC;
|
|
- if (flags & QE_IC_SPREADMODE_GRP_RISCA)
|
|
- temp |= CICR_GRTA;
|
|
- if (flags & QE_IC_SPREADMODE_GRP_RISCB)
|
|
- temp |= CICR_GRTB;
|
|
-
|
|
- /* choose destination signal for highest priority interrupt */
|
|
- if (flags & QE_IC_HIGH_SIGNAL) {
|
|
- temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
|
|
- high_active = 1;
|
|
- }
|
|
-
|
|
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
|
|
-
|
|
- irq_set_handler_data(qe_ic->virq_low, qe_ic);
|
|
- irq_set_chained_handler(qe_ic->virq_low, low_handler);
|
|
-
|
|
- if (qe_ic->virq_high != NO_IRQ &&
|
|
- qe_ic->virq_high != qe_ic->virq_low) {
|
|
- irq_set_handler_data(qe_ic->virq_high, qe_ic);
|
|
- irq_set_chained_handler(qe_ic->virq_high, high_handler);
|
|
- }
|
|
-}
|
|
-
|
|
-void qe_ic_set_highest_priority(unsigned int virq, int high)
|
|
-{
|
|
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
|
- unsigned int src = virq_to_hw(virq);
|
|
- u32 temp = 0;
|
|
-
|
|
- temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
|
|
-
|
|
- temp &= ~CICR_HP_MASK;
|
|
- temp |= src << CICR_HP_SHIFT;
|
|
-
|
|
- temp &= ~CICR_HPIT_MASK;
|
|
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
|
|
-
|
|
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
|
|
-}
|
|
-
|
|
-/* Set Priority level within its group, from 1 to 8 */
|
|
-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
|
|
-{
|
|
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
|
- unsigned int src = virq_to_hw(virq);
|
|
- u32 temp;
|
|
-
|
|
- if (priority > 8 || priority == 0)
|
|
- return -EINVAL;
|
|
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
|
|
- "%s: Invalid hw irq number for QEIC\n", __func__))
|
|
- return -EINVAL;
|
|
- if (qe_ic_info[src].pri_reg == 0)
|
|
- return -EINVAL;
|
|
-
|
|
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
|
|
-
|
|
- if (priority < 4) {
|
|
- temp &= ~(0x7 << (32 - priority * 3));
|
|
- temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
|
|
- } else {
|
|
- temp &= ~(0x7 << (24 - priority * 3));
|
|
- temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
|
|
- }
|
|
-
|
|
- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
|
|
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
|
|
-{
|
|
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
|
- unsigned int src = virq_to_hw(virq);
|
|
- u32 temp, control_reg = QEIC_CICNR, shift = 0;
|
|
-
|
|
- if (priority > 2 || priority == 0)
|
|
- return -EINVAL;
|
|
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
|
|
- "%s: Invalid hw irq number for QEIC\n", __func__))
|
|
- return -EINVAL;
|
|
-
|
|
- switch (qe_ic_info[src].pri_reg) {
|
|
- case QEIC_CIPZCC:
|
|
- shift = CICNR_ZCC1T_SHIFT;
|
|
- break;
|
|
- case QEIC_CIPWCC:
|
|
- shift = CICNR_WCC1T_SHIFT;
|
|
- break;
|
|
- case QEIC_CIPYCC:
|
|
- shift = CICNR_YCC1T_SHIFT;
|
|
- break;
|
|
- case QEIC_CIPXCC:
|
|
- shift = CICNR_XCC1T_SHIFT;
|
|
- break;
|
|
- case QEIC_CIPRTA:
|
|
- shift = CRICR_RTA1T_SHIFT;
|
|
- control_reg = QEIC_CRICR;
|
|
- break;
|
|
- case QEIC_CIPRTB:
|
|
- shift = CRICR_RTB1T_SHIFT;
|
|
- control_reg = QEIC_CRICR;
|
|
- break;
|
|
- default:
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- shift += (2 - priority) * 2;
|
|
- temp = qe_ic_read(qe_ic->regs, control_reg);
|
|
- temp &= ~(SIGNAL_MASK << shift);
|
|
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
|
|
- qe_ic_write(qe_ic->regs, control_reg, temp);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct bus_type qe_ic_subsys = {
|
|
- .name = "qe_ic",
|
|
- .dev_name = "qe_ic",
|
|
-};
|
|
-
|
|
-static struct device device_qe_ic = {
|
|
- .id = 0,
|
|
- .bus = &qe_ic_subsys,
|
|
-};
|
|
-
|
|
-static int __init init_qe_ic_sysfs(void)
|
|
-{
|
|
- int rc;
|
|
-
|
|
- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
|
|
-
|
|
- rc = subsys_system_register(&qe_ic_subsys, NULL);
|
|
- if (rc) {
|
|
- printk(KERN_ERR "Failed registering qe_ic sys class\n");
|
|
- return -ENODEV;
|
|
- }
|
|
- rc = device_register(&device_qe_ic);
|
|
- if (rc) {
|
|
- printk(KERN_ERR "Failed registering qe_ic sys device\n");
|
|
- return -ENODEV;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-subsys_initcall(init_qe_ic_sysfs);
|
|
--- /dev/null
|
|
+++ b/drivers/irqchip/irq-qeic.c
|
|
@@ -0,0 +1,605 @@
|
|
+/*
|
|
+ * drivers/irqchip/irq-qeic.c
|
|
+ *
|
|
+ * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
|
|
+ *
|
|
+ * Author: Li Yang <leoli@freescale.com>
|
|
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
|
|
+ *
|
|
+ * QUICC ENGINE Interrupt Controller
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License as published by the
|
|
+ * Free Software Foundation; either version 2 of the License, or (at your
|
|
+ * option) any later version.
|
|
+ */
|
|
+
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/irqdomain.h>
|
|
+#include <linux/irqchip.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/reboot.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/stddef.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/signal.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/irq.h>
|
|
+#include <asm/io.h>
|
|
+
|
|
+#define NR_QE_IC_INTS 64
|
|
+
|
|
+/* QE IC registers offset */
|
|
+#define QEIC_CICR 0x00
|
|
+#define QEIC_CIVEC 0x04
|
|
+#define QEIC_CRIPNR 0x08
|
|
+#define QEIC_CIPNR 0x0c
|
|
+#define QEIC_CIPXCC 0x10
|
|
+#define QEIC_CIPYCC 0x14
|
|
+#define QEIC_CIPWCC 0x18
|
|
+#define QEIC_CIPZCC 0x1c
|
|
+#define QEIC_CIMR 0x20
|
|
+#define QEIC_CRIMR 0x24
|
|
+#define QEIC_CICNR 0x28
|
|
+#define QEIC_CIPRTA 0x30
|
|
+#define QEIC_CIPRTB 0x34
|
|
+#define QEIC_CRICR 0x3c
|
|
+#define QEIC_CHIVEC 0x60
|
|
+
|
|
+/* Interrupt priority registers */
|
|
+#define CIPCC_SHIFT_PRI0 29
|
|
+#define CIPCC_SHIFT_PRI1 26
|
|
+#define CIPCC_SHIFT_PRI2 23
|
|
+#define CIPCC_SHIFT_PRI3 20
|
|
+#define CIPCC_SHIFT_PRI4 13
|
|
+#define CIPCC_SHIFT_PRI5 10
|
|
+#define CIPCC_SHIFT_PRI6 7
|
|
+#define CIPCC_SHIFT_PRI7 4
|
|
+
|
|
+/* CICR priority modes */
|
|
+#define CICR_GWCC 0x00040000
|
|
+#define CICR_GXCC 0x00020000
|
|
+#define CICR_GYCC 0x00010000
|
|
+#define CICR_GZCC 0x00080000
|
|
+#define CICR_GRTA 0x00200000
|
|
+#define CICR_GRTB 0x00400000
|
|
+#define CICR_HPIT_SHIFT 8
|
|
+#define CICR_HPIT_MASK 0x00000300
|
|
+#define CICR_HP_SHIFT 24
|
|
+#define CICR_HP_MASK 0x3f000000
|
|
+
|
|
+/* CICNR */
|
|
+#define CICNR_WCC1T_SHIFT 20
|
|
+#define CICNR_ZCC1T_SHIFT 28
|
|
+#define CICNR_YCC1T_SHIFT 12
|
|
+#define CICNR_XCC1T_SHIFT 4
|
|
+
|
|
+/* CRICR */
|
|
+#define CRICR_RTA1T_SHIFT 20
|
|
+#define CRICR_RTB1T_SHIFT 28
|
|
+
|
|
+/* Signal indicator */
|
|
+#define SIGNAL_MASK 3
|
|
+#define SIGNAL_HIGH 2
|
|
+#define SIGNAL_LOW 0
|
|
+
|
|
+#define NUM_OF_QE_IC_GROUPS 6
|
|
+
|
|
+/* Flags when we init the QE IC */
|
|
+#define QE_IC_SPREADMODE_GRP_W 0x00000001
|
|
+#define QE_IC_SPREADMODE_GRP_X 0x00000002
|
|
+#define QE_IC_SPREADMODE_GRP_Y 0x00000004
|
|
+#define QE_IC_SPREADMODE_GRP_Z 0x00000008
|
|
+#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
|
|
+#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
|
|
+
|
|
+#define QE_IC_LOW_SIGNAL 0x00000100
|
|
+#define QE_IC_HIGH_SIGNAL 0x00000200
|
|
+
|
|
+#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
|
|
+#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
|
|
+#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
|
|
+#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
|
|
+#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
|
|
+#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
|
|
+#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
|
|
+#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
|
|
+#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
|
|
+#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
|
|
+#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
|
|
+#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
|
|
+#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
|
|
+
|
|
+/* QE interrupt sources groups */
|
|
+enum qe_ic_grp_id {
|
|
+ QE_IC_GRP_W = 0, /* QE interrupt controller group W */
|
|
+ QE_IC_GRP_X, /* QE interrupt controller group X */
|
|
+ QE_IC_GRP_Y, /* QE interrupt controller group Y */
|
|
+ QE_IC_GRP_Z, /* QE interrupt controller group Z */
|
|
+ QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
|
|
+ QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
|
|
+};
|
|
+
|
|
+struct qe_ic {
|
|
+ /* Control registers offset */
|
|
+ u32 __iomem *regs;
|
|
+
|
|
+ /* The remapper for this QEIC */
|
|
+ struct irq_domain *irqhost;
|
|
+
|
|
+ /* The "linux" controller struct */
|
|
+ struct irq_chip hc_irq;
|
|
+
|
|
+ /* VIRQ numbers of QE high/low irqs */
|
|
+ unsigned int virq_high;
|
|
+ unsigned int virq_low;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * QE interrupt controller internal structure
|
|
+ */
|
|
+struct qe_ic_info {
|
|
+ /* location of this source at the QIMR register. */
|
|
+ u32 mask;
|
|
+
|
|
+ /* Mask register offset */
|
|
+ u32 mask_reg;
|
|
+
|
|
+ /*
|
|
+ * for grouped interrupts sources - the interrupt
|
|
+ * code as appears at the group priority register
|
|
+ */
|
|
+ u8 pri_code;
|
|
+
|
|
+ /* Group priority register offset */
|
|
+ u32 pri_reg;
|
|
+};
|
|
+
|
|
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
|
|
+
|
|
+static struct qe_ic_info qe_ic_info[] = {
|
|
+ [1] = {
|
|
+ .mask = 0x00008000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 0,
|
|
+ .pri_reg = QEIC_CIPWCC,
|
|
+ },
|
|
+ [2] = {
|
|
+ .mask = 0x00004000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 1,
|
|
+ .pri_reg = QEIC_CIPWCC,
|
|
+ },
|
|
+ [3] = {
|
|
+ .mask = 0x00002000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 2,
|
|
+ .pri_reg = QEIC_CIPWCC,
|
|
+ },
|
|
+ [10] = {
|
|
+ .mask = 0x00000040,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 1,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [11] = {
|
|
+ .mask = 0x00000020,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 2,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [12] = {
|
|
+ .mask = 0x00000010,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 3,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [13] = {
|
|
+ .mask = 0x00000008,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 4,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [14] = {
|
|
+ .mask = 0x00000004,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 5,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [15] = {
|
|
+ .mask = 0x00000002,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 6,
|
|
+ .pri_reg = QEIC_CIPZCC,
|
|
+ },
|
|
+ [20] = {
|
|
+ .mask = 0x10000000,
|
|
+ .mask_reg = QEIC_CRIMR,
|
|
+ .pri_code = 3,
|
|
+ .pri_reg = QEIC_CIPRTA,
|
|
+ },
|
|
+ [25] = {
|
|
+ .mask = 0x00800000,
|
|
+ .mask_reg = QEIC_CRIMR,
|
|
+ .pri_code = 0,
|
|
+ .pri_reg = QEIC_CIPRTB,
|
|
+ },
|
|
+ [26] = {
|
|
+ .mask = 0x00400000,
|
|
+ .mask_reg = QEIC_CRIMR,
|
|
+ .pri_code = 1,
|
|
+ .pri_reg = QEIC_CIPRTB,
|
|
+ },
|
|
+ [27] = {
|
|
+ .mask = 0x00200000,
|
|
+ .mask_reg = QEIC_CRIMR,
|
|
+ .pri_code = 2,
|
|
+ .pri_reg = QEIC_CIPRTB,
|
|
+ },
|
|
+ [28] = {
|
|
+ .mask = 0x00100000,
|
|
+ .mask_reg = QEIC_CRIMR,
|
|
+ .pri_code = 3,
|
|
+ .pri_reg = QEIC_CIPRTB,
|
|
+ },
|
|
+ [32] = {
|
|
+ .mask = 0x80000000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 0,
|
|
+ .pri_reg = QEIC_CIPXCC,
|
|
+ },
|
|
+ [33] = {
|
|
+ .mask = 0x40000000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 1,
|
|
+ .pri_reg = QEIC_CIPXCC,
|
|
+ },
|
|
+ [34] = {
|
|
+ .mask = 0x20000000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 2,
|
|
+ .pri_reg = QEIC_CIPXCC,
|
|
+ },
|
|
+ [35] = {
|
|
+ .mask = 0x10000000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 3,
|
|
+ .pri_reg = QEIC_CIPXCC,
|
|
+ },
|
|
+ [36] = {
|
|
+ .mask = 0x08000000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 4,
|
|
+ .pri_reg = QEIC_CIPXCC,
|
|
+ },
|
|
+ [40] = {
|
|
+ .mask = 0x00800000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 0,
|
|
+ .pri_reg = QEIC_CIPYCC,
|
|
+ },
|
|
+ [41] = {
|
|
+ .mask = 0x00400000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 1,
|
|
+ .pri_reg = QEIC_CIPYCC,
|
|
+ },
|
|
+ [42] = {
|
|
+ .mask = 0x00200000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 2,
|
|
+ .pri_reg = QEIC_CIPYCC,
|
|
+ },
|
|
+ [43] = {
|
|
+ .mask = 0x00100000,
|
|
+ .mask_reg = QEIC_CIMR,
|
|
+ .pri_code = 3,
|
|
+ .pri_reg = QEIC_CIPYCC,
|
|
+ },
|
|
+};
|
|
+
|
|
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
|
|
+{
|
|
+ return ioread32be(base + (reg >> 2));
|
|
+}
|
|
+
|
|
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
|
|
+ u32 value)
|
|
+{
|
|
+ iowrite32be(value, base + (reg >> 2));
|
|
+}
|
|
+
|
|
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
|
|
+{
|
|
+ return irq_get_chip_data(virq);
|
|
+}
|
|
+
|
|
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
|
|
+{
|
|
+ return irq_data_get_irq_chip_data(d);
|
|
+}
|
|
+
|
|
+static void qe_ic_unmask_irq(struct irq_data *d)
|
|
+{
|
|
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
|
+ unsigned int src = irqd_to_hwirq(d);
|
|
+ unsigned long flags;
|
|
+ u32 temp;
|
|
+
|
|
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
|
+
|
|
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
|
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
|
+ temp | qe_ic_info[src].mask);
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
|
+}
|
|
+
|
|
+static void qe_ic_mask_irq(struct irq_data *d)
|
|
+{
|
|
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
|
+ unsigned int src = irqd_to_hwirq(d);
|
|
+ unsigned long flags;
|
|
+ u32 temp;
|
|
+
|
|
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
|
+
|
|
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
|
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
|
+ temp & ~qe_ic_info[src].mask);
|
|
+
|
|
+ /* Flush the above write before enabling interrupts; otherwise,
|
|
+ * spurious interrupts will sometimes happen. To be 100% sure
|
|
+ * that the write has reached the device before interrupts are
|
|
+ * enabled, the mask register would have to be read back; however,
|
|
+ * this is not required for correctness, only to avoid wasting
|
|
+ * time on a large number of spurious interrupts. In testing,
|
|
+ * a sync reduced the observed spurious interrupts to zero.
|
|
+ */
|
|
+ mb();
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
|
+}
|
|
+
|
|
+static struct irq_chip qe_ic_irq_chip = {
|
|
+ .name = "QEIC",
|
|
+ .irq_unmask = qe_ic_unmask_irq,
|
|
+ .irq_mask = qe_ic_mask_irq,
|
|
+ .irq_mask_ack = qe_ic_mask_irq,
|
|
+};
|
|
+
|
|
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
|
|
+ enum irq_domain_bus_token bus_token)
|
|
+{
|
|
+ /* Exact match, unless qe_ic node is NULL */
|
|
+ struct device_node *of_node = irq_domain_get_of_node(h);
|
|
+ return of_node == NULL || of_node == node;
|
|
+}
|
|
+
|
|
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
|
|
+ irq_hw_number_t hw)
|
|
+{
|
|
+ struct qe_ic *qe_ic = h->host_data;
|
|
+ struct irq_chip *chip;
|
|
+
|
|
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
|
|
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (qe_ic_info[hw].mask == 0) {
|
|
+ printk(KERN_ERR "Can't map reserved IRQ\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ /* Default chip */
|
|
+ chip = &qe_ic->hc_irq;
|
|
+
|
|
+ irq_set_chip_data(virq, qe_ic);
|
|
+ irq_set_status_flags(virq, IRQ_LEVEL);
|
|
+
|
|
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct irq_domain_ops qe_ic_host_ops = {
|
|
+ .match = qe_ic_host_match,
|
|
+ .map = qe_ic_host_map,
|
|
+ .xlate = irq_domain_xlate_onetwocell,
|
|
+};
|
|
+
|
|
+/* Return an interrupt vector or 0 if no interrupt is pending. */
|
|
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
|
|
+{
|
|
+ int irq;
|
|
+
|
|
+ BUG_ON(qe_ic == NULL);
|
|
+
|
|
+ /* get the interrupt source vector. */
|
|
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
|
|
+
|
|
+ if (irq == 0)
|
|
+ return 0;
|
|
+
|
|
+ return irq_linear_revmap(qe_ic->irqhost, irq);
|
|
+}
|
|
+
|
|
+/* Return an interrupt vector or 0 if no interrupt is pending. */
|
|
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
|
|
+{
|
|
+ int irq;
|
|
+
|
|
+ BUG_ON(qe_ic == NULL);
|
|
+
|
|
+ /* get the interrupt source vector. */
|
|
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
|
|
+
|
|
+ if (irq == 0)
|
|
+ return 0;
|
|
+
|
|
+ return irq_linear_revmap(qe_ic->irqhost, irq);
|
|
+}
|
|
+
|
|
+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
|
|
+{
|
|
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
+
|
|
+ if (cascade_irq != 0)
|
|
+ generic_handle_irq(cascade_irq);
|
|
+}
|
|
+
|
|
+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
|
|
+{
|
|
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
+
|
|
+ if (cascade_irq != 0)
|
|
+ generic_handle_irq(cascade_irq);
|
|
+}
|
|
+
|
|
+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
|
|
+{
|
|
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
+ struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
+
|
|
+ if (cascade_irq != 0)
|
|
+ generic_handle_irq(cascade_irq);
|
|
+
|
|
+ chip->irq_eoi(&desc->irq_data);
|
|
+}
|
|
+
|
|
+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
|
|
+{
|
|
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
+ struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
+
|
|
+ if (cascade_irq != 0)
|
|
+ generic_handle_irq(cascade_irq);
|
|
+
|
|
+ chip->irq_eoi(&desc->irq_data);
|
|
+}
|
|
+
|
|
+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
|
|
+{
|
|
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
+ unsigned int cascade_irq;
|
|
+ struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
+
|
|
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
+ if (cascade_irq == 0)
|
|
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
+
|
|
+ if (cascade_irq != 0)
|
|
+ generic_handle_irq(cascade_irq);
|
|
+
|
|
+ chip->irq_eoi(&desc->irq_data);
|
|
+}
|
|
+
|
|
+static int __init qe_ic_init(struct device_node *node, unsigned int flags)
|
|
+{
|
|
+ struct qe_ic *qe_ic;
|
|
+ struct resource res;
|
|
+ u32 temp = 0, high_active = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!node)
|
|
+ return -ENODEV;
|
|
+
|
|
+ ret = of_address_to_resource(node, 0, &res);
|
|
+ if (ret) {
|
|
+ ret = -ENODEV;
|
|
+ goto err_put_node;
|
|
+ }
|
|
+
|
|
+ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
|
|
+ if (qe_ic == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_put_node;
|
|
+ }
|
|
+
|
|
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
|
|
+ &qe_ic_host_ops, qe_ic);
|
|
+ if (qe_ic->irqhost == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_free_qe_ic;
|
|
+ }
|
|
+
|
|
+ qe_ic->regs = ioremap(res.start, resource_size(&res));
|
|
+
|
|
+ qe_ic->hc_irq = qe_ic_irq_chip;
|
|
+
|
|
+ qe_ic->virq_high = irq_of_parse_and_map(node, 0);
|
|
+ qe_ic->virq_low = irq_of_parse_and_map(node, 1);
|
|
+
|
|
+ if (qe_ic->virq_low == 0) {
|
|
+ pr_err("Failed to map QE_IC low IRQ\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto err_domain_remove;
|
|
+ }
|
|
+
|
|
+ /* default priority scheme is grouped. If spread mode is */
|
|
+ /* required, configure cicr accordingly. */
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_W)
|
|
+ temp |= CICR_GWCC;
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_X)
|
|
+ temp |= CICR_GXCC;
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_Y)
|
|
+ temp |= CICR_GYCC;
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_Z)
|
|
+ temp |= CICR_GZCC;
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_RISCA)
|
|
+ temp |= CICR_GRTA;
|
|
+ if (flags & QE_IC_SPREADMODE_GRP_RISCB)
|
|
+ temp |= CICR_GRTB;
|
|
+
|
|
+ /* choose destination signal for highest priority interrupt */
|
|
+ if (flags & QE_IC_HIGH_SIGNAL) {
|
|
+ temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
|
|
+ high_active = 1;
|
|
+ }
|
|
+
|
|
+ qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
|
|
+
|
|
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
|
|
+ irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
|
|
+
|
|
+ if (qe_ic->virq_high != 0 &&
|
|
+ qe_ic->virq_high != qe_ic->virq_low) {
|
|
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
|
|
+ irq_set_chained_handler(qe_ic->virq_high,
|
|
+ qe_ic_cascade_high_mpic);
|
|
+ }
|
|
+ of_node_put(node);
|
|
+ return 0;
|
|
+
|
|
+err_domain_remove:
|
|
+ irq_domain_remove(qe_ic->irqhost);
|
|
+err_free_qe_ic:
|
|
+ kfree(qe_ic);
|
|
+err_put_node:
|
|
+ of_node_put(node);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int __init init_qe_ic(struct device_node *node,
|
|
+ struct device_node *parent)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = qe_ic_init(node, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
|
|
--- a/drivers/net/wan/fsl_ucc_hdlc.c
|
|
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
|
|
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk
|
|
/* set bd status and length */
|
|
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
|
|
|
|
- iowrite16be(bd_status, &bd->status);
|
|
iowrite16be(skb->len, &bd->length);
|
|
+ iowrite16be(bd_status, &bd->status);
|
|
|
|
/* Move to next BD in the ring */
|
|
if (!(bd_status & T_W_S))
|
|
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_
|
|
struct sk_buff *skb = NULL;
|
|
hdlc_device *hdlc = dev_to_hdlc(dev);
|
|
struct qe_bd *bd;
|
|
- u32 bd_status;
|
|
+ u16 bd_status;
|
|
u16 length, howmany = 0;
|
|
u8 *bdbuffer;
|
|
int i;
|
|
--- a/drivers/soc/fsl/qe/Kconfig
|
|
+++ b/drivers/soc/fsl/qe/Kconfig
|
|
@@ -4,7 +4,7 @@
|
|
|
|
config QUICC_ENGINE
|
|
bool "Freescale QUICC Engine (QE) Support"
|
|
- depends on FSL_SOC && PPC32
|
|
+ depends on OF && HAS_IOMEM
|
|
select GENERIC_ALLOCATOR
|
|
select CRC32
|
|
help
|
|
--- a/drivers/soc/fsl/qe/Makefile
|
|
+++ b/drivers/soc/fsl/qe/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
#
|
|
# Makefile for the linux ppc-specific parts of QE
|
|
#
|
|
-obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
|
|
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o
|
|
obj-$(CONFIG_CPM) += qe_common.o
|
|
obj-$(CONFIG_UCC) += ucc.o
|
|
obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
|
|
--- a/drivers/soc/fsl/qe/qe.c
|
|
+++ b/drivers/soc/fsl/qe/qe.c
|
|
@@ -33,8 +33,6 @@
|
|
#include <asm/pgtable.h>
|
|
#include <soc/fsl/qe/immap_qe.h>
|
|
#include <soc/fsl/qe/qe.h>
|
|
-#include <asm/prom.h>
|
|
-#include <asm/rheap.h>
|
|
|
|
static void qe_snums_init(void);
|
|
static int qe_sdma_init(void);
|
|
@@ -109,15 +107,27 @@ void qe_reset(void)
|
|
panic("sdma init failed!");
|
|
}
|
|
|
|
+/* issue commands to QE, return 0 on success while -EIO on error
|
|
+ *
|
|
+ * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
|
|
+ * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
|
|
+ * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
|
|
+ * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
|
|
+ * @mcn_protocol: specifies mode for the command for non-MCC, should be
|
|
+ * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
|
|
+ * and such on.
|
|
+ * @cmd_input: command related data.
|
|
+ */
|
|
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
|
|
{
|
|
unsigned long flags;
|
|
u8 mcn_shift = 0, dev_shift = 0;
|
|
- u32 ret;
|
|
+ int ret;
|
|
+ int i;
|
|
|
|
spin_lock_irqsave(&qe_lock, flags);
|
|
if (cmd == QE_RESET) {
|
|
- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
|
|
+ iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
|
|
} else {
|
|
if (cmd == QE_ASSIGN_PAGE) {
|
|
/* Here device is the SNUM, not sub-block */
|
|
@@ -134,20 +144,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
|
|
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
|
|
}
|
|
|
|
- out_be32(&qe_immr->cp.cecdr, cmd_input);
|
|
- out_be32(&qe_immr->cp.cecr,
|
|
- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
|
|
- mcn_protocol << mcn_shift));
|
|
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
|
|
+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
|
|
+ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
|
|
}
|
|
|
|
/* wait for the QE_CR_FLG to clear */
|
|
- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
|
|
- 100, 0);
|
|
+ ret = -EIO;
|
|
+ for (i = 0; i < 100; i++) {
|
|
+ if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
|
|
+ ret = 0;
|
|
+ break;
|
|
+ }
|
|
+ udelay(1);
|
|
+ }
|
|
+
|
|
/* On timeout (e.g. failure), the expression will be false (ret == 0),
|
|
otherwise it will be true (ret == 1). */
|
|
spin_unlock_irqrestore(&qe_lock, flags);
|
|
|
|
- return ret == 1;
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(qe_issue_cmd);
|
|
|
|
@@ -169,8 +185,8 @@ static unsigned int brg_clk = 0;
|
|
unsigned int qe_get_brg_clk(void)
|
|
{
|
|
struct device_node *qe;
|
|
- int size;
|
|
- const u32 *prop;
|
|
+ u32 val;
|
|
+ int ret;
|
|
unsigned int mod;
|
|
|
|
if (brg_clk)
|
|
@@ -183,9 +199,9 @@ unsigned int qe_get_brg_clk(void)
|
|
return brg_clk;
|
|
}
|
|
|
|
- prop = of_get_property(qe, "brg-frequency", &size);
|
|
- if (prop && size == sizeof(*prop))
|
|
- brg_clk = *prop;
|
|
+ ret = of_property_read_u32(qe, "brg-frequency", &val);
|
|
+ if (!ret)
|
|
+ brg_clk = val;
|
|
|
|
of_node_put(qe);
|
|
|
|
@@ -234,7 +250,7 @@ int qe_setbrg(enum qe_clock brg, unsigne
|
|
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
|
|
QE_BRGC_ENABLE | div16;
|
|
|
|
- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
|
|
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
|
|
|
|
return 0;
|
|
}
|
|
@@ -368,9 +384,9 @@ static int qe_sdma_init(void)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
|
|
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
|
|
- (0x1 << QE_SDMR_CEN_SHIFT)));
|
|
+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
|
|
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
|
|
+ &sdma->sdmr);
|
|
|
|
return 0;
|
|
}
|
|
@@ -408,14 +424,14 @@ static void qe_upload_microcode(const vo
|
|
"uploading microcode '%s'\n", ucode->id);
|
|
|
|
/* Use auto-increment */
|
|
- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
|
|
- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
|
|
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
|
|
+ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
|
|
|
|
for (i = 0; i < be32_to_cpu(ucode->count); i++)
|
|
- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
|
|
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
|
|
|
|
/* Set I-RAM Ready Register */
|
|
- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
|
|
+ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
|
|
}
|
|
|
|
/*
|
|
@@ -500,7 +516,7 @@ int qe_upload_firmware(const struct qe_f
|
|
* If the microcode calls for it, split the I-RAM.
|
|
*/
|
|
if (!firmware->split)
|
|
- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
|
|
+ qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
|
|
|
|
if (firmware->soc.model)
|
|
printk(KERN_INFO
|
|
@@ -534,11 +550,11 @@ int qe_upload_firmware(const struct qe_f
|
|
u32 trap = be32_to_cpu(ucode->traps[j]);
|
|
|
|
if (trap)
|
|
- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
|
|
+ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
|
|
}
|
|
|
|
/* Enable traps */
|
|
- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
|
|
+ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
|
|
}
|
|
|
|
qe_firmware_uploaded = 1;
|
|
@@ -657,9 +673,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
|
|
unsigned int qe_get_num_of_snums(void)
|
|
{
|
|
struct device_node *qe;
|
|
- int size;
|
|
unsigned int num_of_snums;
|
|
- const u32 *prop;
|
|
+ u32 val;
|
|
+ int ret;
|
|
|
|
num_of_snums = 28; /* The default number of snum for threads is 28 */
|
|
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
|
@@ -673,9 +689,9 @@ unsigned int qe_get_num_of_snums(void)
|
|
return num_of_snums;
|
|
}
|
|
|
|
- prop = of_get_property(qe, "fsl,qe-num-snums", &size);
|
|
- if (prop && size == sizeof(*prop)) {
|
|
- num_of_snums = *prop;
|
|
+ ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val);
|
|
+ if (!ret) {
|
|
+ num_of_snums = val;
|
|
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
|
|
/* No QE ever has fewer than 28 SNUMs */
|
|
pr_err("QE: number of snum is invalid\n");
|
|
--- a/drivers/soc/fsl/qe/qe_ic.h
|
|
+++ /dev/null
|
|
@@ -1,103 +0,0 @@
|
|
-/*
|
|
- * drivers/soc/fsl/qe/qe_ic.h
|
|
- *
|
|
- * QUICC ENGINE Interrupt Controller Header
|
|
- *
|
|
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
|
- *
|
|
- * Author: Li Yang <leoli@freescale.com>
|
|
- * Based on code from Shlomi Gridish <gridish@freescale.com>
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or modify it
|
|
- * under the terms of the GNU General Public License as published by the
|
|
- * Free Software Foundation; either version 2 of the License, or (at your
|
|
- * option) any later version.
|
|
- */
|
|
-#ifndef _POWERPC_SYSDEV_QE_IC_H
|
|
-#define _POWERPC_SYSDEV_QE_IC_H
|
|
-
|
|
-#include <soc/fsl/qe/qe_ic.h>
|
|
-
|
|
-#define NR_QE_IC_INTS 64
|
|
-
|
|
-/* QE IC registers offset */
|
|
-#define QEIC_CICR 0x00
|
|
-#define QEIC_CIVEC 0x04
|
|
-#define QEIC_CRIPNR 0x08
|
|
-#define QEIC_CIPNR 0x0c
|
|
-#define QEIC_CIPXCC 0x10
|
|
-#define QEIC_CIPYCC 0x14
|
|
-#define QEIC_CIPWCC 0x18
|
|
-#define QEIC_CIPZCC 0x1c
|
|
-#define QEIC_CIMR 0x20
|
|
-#define QEIC_CRIMR 0x24
|
|
-#define QEIC_CICNR 0x28
|
|
-#define QEIC_CIPRTA 0x30
|
|
-#define QEIC_CIPRTB 0x34
|
|
-#define QEIC_CRICR 0x3c
|
|
-#define QEIC_CHIVEC 0x60
|
|
-
|
|
-/* Interrupt priority registers */
|
|
-#define CIPCC_SHIFT_PRI0 29
|
|
-#define CIPCC_SHIFT_PRI1 26
|
|
-#define CIPCC_SHIFT_PRI2 23
|
|
-#define CIPCC_SHIFT_PRI3 20
|
|
-#define CIPCC_SHIFT_PRI4 13
|
|
-#define CIPCC_SHIFT_PRI5 10
|
|
-#define CIPCC_SHIFT_PRI6 7
|
|
-#define CIPCC_SHIFT_PRI7 4
|
|
-
|
|
-/* CICR priority modes */
|
|
-#define CICR_GWCC 0x00040000
|
|
-#define CICR_GXCC 0x00020000
|
|
-#define CICR_GYCC 0x00010000
|
|
-#define CICR_GZCC 0x00080000
|
|
-#define CICR_GRTA 0x00200000
|
|
-#define CICR_GRTB 0x00400000
|
|
-#define CICR_HPIT_SHIFT 8
|
|
-#define CICR_HPIT_MASK 0x00000300
|
|
-#define CICR_HP_SHIFT 24
|
|
-#define CICR_HP_MASK 0x3f000000
|
|
-
|
|
-/* CICNR */
|
|
-#define CICNR_WCC1T_SHIFT 20
|
|
-#define CICNR_ZCC1T_SHIFT 28
|
|
-#define CICNR_YCC1T_SHIFT 12
|
|
-#define CICNR_XCC1T_SHIFT 4
|
|
-
|
|
-/* CRICR */
|
|
-#define CRICR_RTA1T_SHIFT 20
|
|
-#define CRICR_RTB1T_SHIFT 28
|
|
-
|
|
-/* Signal indicator */
|
|
-#define SIGNAL_MASK 3
|
|
-#define SIGNAL_HIGH 2
|
|
-#define SIGNAL_LOW 0
|
|
-
|
|
-struct qe_ic {
|
|
- /* Control registers offset */
|
|
- volatile u32 __iomem *regs;
|
|
-
|
|
- /* The remapper for this QEIC */
|
|
- struct irq_domain *irqhost;
|
|
-
|
|
- /* The "linux" controller struct */
|
|
- struct irq_chip hc_irq;
|
|
-
|
|
- /* VIRQ numbers of QE high/low irqs */
|
|
- unsigned int virq_high;
|
|
- unsigned int virq_low;
|
|
-};
|
|
-
|
|
-/*
|
|
- * QE interrupt controller internal structure
|
|
- */
|
|
-struct qe_ic_info {
|
|
- u32 mask; /* location of this source at the QIMR register. */
|
|
- u32 mask_reg; /* Mask register offset */
|
|
- u8 pri_code; /* for grouped interrupts sources - the interrupt
|
|
- code as appears at the group priority register */
|
|
- u32 pri_reg; /* Group priority register offset */
|
|
-};
|
|
-
|
|
-#endif /* _POWERPC_SYSDEV_QE_IC_H */
|
|
--- a/drivers/soc/fsl/qe/qe_io.c
|
|
+++ b/drivers/soc/fsl/qe/qe_io.c
|
|
@@ -22,8 +22,6 @@
|
|
|
|
#include <asm/io.h>
|
|
#include <soc/fsl/qe/qe.h>
|
|
-#include <asm/prom.h>
|
|
-#include <sysdev/fsl_soc.h>
|
|
|
|
#undef DEBUG
|
|
|
|
@@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_r
|
|
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
|
|
|
|
/* Set open drain, if required */
|
|
- tmp_val = in_be32(&par_io->cpodr);
|
|
+ tmp_val = ioread32be(&par_io->cpodr);
|
|
if (open_drain)
|
|
- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
|
|
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
|
|
else
|
|
- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
|
|
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
|
|
|
|
/* define direction */
|
|
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
|
- in_be32(&par_io->cpdir2) :
|
|
- in_be32(&par_io->cpdir1);
|
|
+ ioread32be(&par_io->cpdir2) :
|
|
+ ioread32be(&par_io->cpdir1);
|
|
|
|
/* get all bits mask for 2 bit per port */
|
|
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
|
|
@@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_r
|
|
|
|
/* clear and set 2 bits mask */
|
|
if (pin > (QE_PIO_PINS / 2) - 1) {
|
|
- out_be32(&par_io->cpdir2,
|
|
- ~pin_mask2bits & tmp_val);
|
|
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
|
|
tmp_val &= ~pin_mask2bits;
|
|
- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
|
|
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
|
|
} else {
|
|
- out_be32(&par_io->cpdir1,
|
|
- ~pin_mask2bits & tmp_val);
|
|
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
|
|
tmp_val &= ~pin_mask2bits;
|
|
- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
|
|
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
|
|
}
|
|
/* define pin assignment */
|
|
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
|
- in_be32(&par_io->cppar2) :
|
|
- in_be32(&par_io->cppar1);
|
|
+ ioread32be(&par_io->cppar2) :
|
|
+ ioread32be(&par_io->cppar1);
|
|
|
|
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
|
|
(pin % (QE_PIO_PINS / 2) + 1) * 2));
|
|
/* clear and set 2 bits mask */
|
|
if (pin > (QE_PIO_PINS / 2) - 1) {
|
|
- out_be32(&par_io->cppar2,
|
|
- ~pin_mask2bits & tmp_val);
|
|
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
|
|
tmp_val &= ~pin_mask2bits;
|
|
- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
|
|
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
|
|
} else {
|
|
- out_be32(&par_io->cppar1,
|
|
- ~pin_mask2bits & tmp_val);
|
|
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
|
|
tmp_val &= ~pin_mask2bits;
|
|
- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
|
|
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__par_io_config_pin);
|
|
@@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8
|
|
/* calculate pin location */
|
|
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
|
|
|
|
- tmp_val = in_be32(&par_io[port].cpdata);
|
|
+ tmp_val = ioread32be(&par_io[port].cpdata);
|
|
|
|
if (val == 0) /* clear */
|
|
- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
|
|
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
|
|
else /* set */
|
|
- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
|
|
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
|
|
|
|
return 0;
|
|
}
|
|
--- a/drivers/soc/fsl/qe/qe_tdm.c
|
|
+++ b/drivers/soc/fsl/qe/qe_tdm.c
|
|
@@ -227,10 +227,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
|
|
&siram[siram_entry_id * 32 + 0x200 + i]);
|
|
}
|
|
|
|
- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
|
|
- SIR_LAST);
|
|
- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
|
|
- SIR_LAST);
|
|
+ qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
|
|
+ SIR_LAST);
|
|
+ qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
|
|
+ (utdm->num_of_ts - 1)], SIR_LAST);
|
|
|
|
/* Set SIxMR register */
|
|
sixmr = SIMR_SAD(siram_entry_id);
|
|
--- a/drivers/soc/fsl/qe/ucc.c
|
|
+++ b/drivers/soc/fsl/qe/ucc.c
|
|
@@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
|
|
return -EINVAL;
|
|
|
|
spin_lock_irqsave(&cmxgcr_lock, flags);
|
|
- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
|
|
+ qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
|
|
ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
|
|
spin_unlock_irqrestore(&cmxgcr_lock, flags);
|
|
|
|
@@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, e
|
|
return -EINVAL;
|
|
}
|
|
|
|
- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
|
|
+ qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
|
|
UCC_GUEMR_SET_RESERVED3 | speed);
|
|
|
|
return 0;
|
|
@@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
|
|
get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift);
|
|
|
|
if (set)
|
|
- setbits32(cmxucr, mask << shift);
|
|
+ qe_setbits32(cmxucr, mask << shift);
|
|
else
|
|
- clrbits32(cmxucr, mask << shift);
|
|
+ qe_clrbits32(cmxucr, mask << shift);
|
|
|
|
return 0;
|
|
}
|
|
@@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
|
|
if (mode == COMM_DIR_RX)
|
|
shift += 4;
|
|
|
|
- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
|
|
+ qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
|
|
clock_bits << shift);
|
|
|
|
return 0;
|
|
--- a/drivers/soc/fsl/qe/ucc_fast.c
|
|
+++ b/drivers/soc/fsl/qe/ucc_fast.c
|
|
@@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
|
|
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
|
|
|
|
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
|
|
+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
|
|
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
|
|
+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
|
|
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
|
|
+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
|
|
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
|
|
+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
|
|
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
|
|
+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
|
|
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
|
|
+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
|
|
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
|
|
- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
|
|
+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
|
|
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
|
|
+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
|
|
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
|
|
+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
|
|
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
|
|
+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
|
|
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
|
|
+ &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
|
|
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
|
|
+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
|
|
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
|
|
+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
|
|
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
|
|
+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
|
|
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
|
|
+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
|
|
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
|
|
- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
|
|
+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
|
|
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
|
|
- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
|
|
+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
|
|
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
|
|
- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
|
|
+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
|
|
}
|
|
EXPORT_SYMBOL(ucc_fast_dump_regs);
|
|
|
|
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
|
|
|
|
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
|
|
{
|
|
- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
|
|
+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
|
|
}
|
|
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
|
|
|
|
@@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_pri
|
|
uf_regs = uccf->uf_regs;
|
|
|
|
/* Enable reception and/or transmission on this UCC. */
|
|
- gumr = in_be32(&uf_regs->gumr);
|
|
+ gumr = ioread32be(&uf_regs->gumr);
|
|
if (mode & COMM_DIR_TX) {
|
|
gumr |= UCC_FAST_GUMR_ENT;
|
|
uccf->enabled_tx = 1;
|
|
@@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_pri
|
|
gumr |= UCC_FAST_GUMR_ENR;
|
|
uccf->enabled_rx = 1;
|
|
}
|
|
- out_be32(&uf_regs->gumr, gumr);
|
|
+ iowrite32be(gumr, &uf_regs->gumr);
|
|
}
|
|
EXPORT_SYMBOL(ucc_fast_enable);
|
|
|
|
@@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_pr
|
|
uf_regs = uccf->uf_regs;
|
|
|
|
/* Disable reception and/or transmission on this UCC. */
|
|
- gumr = in_be32(&uf_regs->gumr);
|
|
+ gumr = ioread32be(&uf_regs->gumr);
|
|
if (mode & COMM_DIR_TX) {
|
|
gumr &= ~UCC_FAST_GUMR_ENT;
|
|
uccf->enabled_tx = 0;
|
|
@@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_pr
|
|
gumr &= ~UCC_FAST_GUMR_ENR;
|
|
uccf->enabled_rx = 0;
|
|
}
|
|
- out_be32(&uf_regs->gumr, gumr);
|
|
+ iowrite32be(gumr, &uf_regs->gumr);
|
|
}
|
|
EXPORT_SYMBOL(ucc_fast_disable);
|
|
|
|
@@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info *
|
|
gumr |= uf_info->tenc;
|
|
gumr |= uf_info->tcrc;
|
|
gumr |= uf_info->mode;
|
|
- out_be32(&uf_regs->gumr, gumr);
|
|
+ iowrite32be(gumr, &uf_regs->gumr);
|
|
|
|
/* Allocate memory for Tx Virtual Fifo */
|
|
uccf->ucc_fast_tx_virtual_fifo_base_offset =
|
|
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
|
- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
|
|
+ if (IS_ERR_VALUE((unsigned long)uccf->
|
|
+ ucc_fast_tx_virtual_fifo_base_offset)) {
|
|
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
|
|
__func__);
|
|
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
|
|
@@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info *
|
|
qe_muram_alloc(uf_info->urfs +
|
|
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
|
|
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
|
- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
|
|
+ if (IS_ERR_VALUE((unsigned long)uccf->
|
|
+ ucc_fast_rx_virtual_fifo_base_offset)) {
|
|
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
|
|
__func__);
|
|
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
|
|
@@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info *
|
|
}
|
|
|
|
/* Set Virtual Fifo registers */
|
|
- out_be16(&uf_regs->urfs, uf_info->urfs);
|
|
- out_be16(&uf_regs->urfet, uf_info->urfet);
|
|
- out_be16(&uf_regs->urfset, uf_info->urfset);
|
|
- out_be16(&uf_regs->utfs, uf_info->utfs);
|
|
- out_be16(&uf_regs->utfet, uf_info->utfet);
|
|
- out_be16(&uf_regs->utftt, uf_info->utftt);
|
|
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
|
|
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
|
|
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
|
|
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
|
|
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
|
|
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
|
|
/* utfb, urfb are offsets from MURAM base */
|
|
- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
|
|
- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
|
|
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
|
|
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
|
|
|
|
/* Mux clocking */
|
|
/* Grant Support */
|
|
@@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info *
|
|
}
|
|
|
|
/* Set interrupt mask register at UCC level. */
|
|
- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
|
|
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
|
|
|
|
/* First, clear anything pending at UCC level,
|
|
* otherwise, old garbage may come through
|
|
* as soon as the dam is opened. */
|
|
|
|
/* Writing '1' clears */
|
|
- out_be32(&uf_regs->ucce, 0xffffffff);
|
|
+ iowrite32be(0xffffffff, &uf_regs->ucce);
|
|
|
|
*uccf_ret = uccf;
|
|
return 0;
|
|
--- a/drivers/tty/serial/ucc_uart.c
|
|
+++ b/drivers/tty/serial/ucc_uart.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <soc/fsl/qe/ucc_slow.h>
|
|
|
|
#include <linux/firmware.h>
|
|
+#include <asm/cpm.h>
|
|
#include <asm/reg.h>
|
|
|
|
/*
|
|
--- a/include/soc/fsl/qe/qe.h
|
|
+++ b/include/soc/fsl/qe/qe.h
|
|
@@ -21,7 +21,6 @@
|
|
#include <linux/spinlock.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/err.h>
|
|
-#include <asm/cpm.h>
|
|
#include <soc/fsl/qe/immap_qe.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
--- a/include/soc/fsl/qe/qe_ic.h
|
|
+++ /dev/null
|
|
@@ -1,139 +0,0 @@
|
|
-/*
|
|
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
|
- *
|
|
- * Authors: Shlomi Gridish <gridish@freescale.com>
|
|
- * Li Yang <leoli@freescale.com>
|
|
- *
|
|
- * Description:
|
|
- * QE IC external definitions and structure.
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or modify it
|
|
- * under the terms of the GNU General Public License as published by the
|
|
- * Free Software Foundation; either version 2 of the License, or (at your
|
|
- * option) any later version.
|
|
- */
|
|
-#ifndef _ASM_POWERPC_QE_IC_H
|
|
-#define _ASM_POWERPC_QE_IC_H
|
|
-
|
|
-#include <linux/irq.h>
|
|
-
|
|
-struct device_node;
|
|
-struct qe_ic;
|
|
-
|
|
-#define NUM_OF_QE_IC_GROUPS 6
|
|
-
|
|
-/* Flags when we init the QE IC */
|
|
-#define QE_IC_SPREADMODE_GRP_W 0x00000001
|
|
-#define QE_IC_SPREADMODE_GRP_X 0x00000002
|
|
-#define QE_IC_SPREADMODE_GRP_Y 0x00000004
|
|
-#define QE_IC_SPREADMODE_GRP_Z 0x00000008
|
|
-#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
|
|
-#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
|
|
-
|
|
-#define QE_IC_LOW_SIGNAL 0x00000100
|
|
-#define QE_IC_HIGH_SIGNAL 0x00000200
|
|
-
|
|
-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
|
|
-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
|
|
-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
|
|
-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
|
|
-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
|
|
-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
|
|
-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
|
|
-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
|
|
-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
|
|
-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
|
|
-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
|
|
-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
|
|
-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
|
|
-
|
|
-/* QE interrupt sources groups */
|
|
-enum qe_ic_grp_id {
|
|
- QE_IC_GRP_W = 0, /* QE interrupt controller group W */
|
|
- QE_IC_GRP_X, /* QE interrupt controller group X */
|
|
- QE_IC_GRP_Y, /* QE interrupt controller group Y */
|
|
- QE_IC_GRP_Z, /* QE interrupt controller group Z */
|
|
- QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
|
|
- QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
|
|
-};
|
|
-
|
|
-#ifdef CONFIG_QUICC_ENGINE
|
|
-void qe_ic_init(struct device_node *node, unsigned int flags,
|
|
- void (*low_handler)(struct irq_desc *desc),
|
|
- void (*high_handler)(struct irq_desc *desc));
|
|
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
|
|
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
|
|
-#else
|
|
-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
|
|
- void (*low_handler)(struct irq_desc *desc),
|
|
- void (*high_handler)(struct irq_desc *desc))
|
|
-{}
|
|
-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
|
|
-{ return 0; }
|
|
-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
|
|
-{ return 0; }
|
|
-#endif /* CONFIG_QUICC_ENGINE */
|
|
-
|
|
-void qe_ic_set_highest_priority(unsigned int virq, int high);
|
|
-int qe_ic_set_priority(unsigned int virq, unsigned int priority);
|
|
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
|
|
-
|
|
-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
|
|
-{
|
|
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
-
|
|
- if (cascade_irq != NO_IRQ)
|
|
- generic_handle_irq(cascade_irq);
|
|
-}
|
|
-
|
|
-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
|
|
-{
|
|
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
-
|
|
- if (cascade_irq != NO_IRQ)
|
|
- generic_handle_irq(cascade_irq);
|
|
-}
|
|
-
|
|
-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
|
|
-{
|
|
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
- struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
-
|
|
- if (cascade_irq != NO_IRQ)
|
|
- generic_handle_irq(cascade_irq);
|
|
-
|
|
- chip->irq_eoi(&desc->irq_data);
|
|
-}
|
|
-
|
|
-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
|
|
-{
|
|
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
- struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
-
|
|
- if (cascade_irq != NO_IRQ)
|
|
- generic_handle_irq(cascade_irq);
|
|
-
|
|
- chip->irq_eoi(&desc->irq_data);
|
|
-}
|
|
-
|
|
-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
|
|
-{
|
|
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
|
|
- unsigned int cascade_irq;
|
|
- struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
-
|
|
- cascade_irq = qe_ic_get_high_irq(qe_ic);
|
|
- if (cascade_irq == NO_IRQ)
|
|
- cascade_irq = qe_ic_get_low_irq(qe_ic);
|
|
-
|
|
- if (cascade_irq != NO_IRQ)
|
|
- generic_handle_irq(cascade_irq);
|
|
-
|
|
- chip->irq_eoi(&desc->irq_data);
|
|
-}
|
|
-
|
|
-#endif /* _ASM_POWERPC_QE_IC_H */
|