mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-20 14:13:16 +00:00
55623a9c83
Fixes the following security vulnerabilities: CVE-2017-8890 The inet_csk_clone_lock function in net/ipv4/inet_connection_sock.c in the Linux kernel through 4.10.15 allows attackers to cause a denial of service (double free) or possibly have unspecified other impact by leveraging use of the accept system call. CVE-2017-9074 The IPv6 fragmentation implementation in the Linux kernel through 4.11.1 does not consider that the nexthdr field may be associated with an invalid option, which allows local users to cause a denial of service (out-of-bounds read and BUG) or possibly have unspecified other impact via crafted socket and send system calls. CVE-2017-9075 The sctp_v6_create_accept_sk function in net/sctp/ipv6.c in the Linux kernel through 4.11.1 mishandles inheritance, which allows local users to cause a denial of service or possibly have unspecified other impact via crafted system calls, a related issue to CVE-2017-8890. CVE-2017-9076 The dccp_v6_request_recv_sock function in net/dccp/ipv6.c in the Linux kernel through 4.11.1 mishandles inheritance, which allows local users to cause a denial of service or possibly have unspecified other impact via crafted system calls, a related issue to CVE-2017-8890. CVE-2017-9077 The tcp_v6_syn_recv_sock function in net/ipv6/tcp_ipv6.c in the Linux kernel through 4.11.1 mishandles inheritance, which allows local users to cause a denial of service or possibly have unspecified other impact via crafted system calls, a related issue to CVE-2017-8890. CVE-2017-9242 The __ip6_append_data function in net/ipv6/ip6_output.c in the Linux kernel through 4.11.3 is too late in checking whether an overwrite of an skb data structure may occur, which allows local users to cause a denial of service (system crash) via crafted system calls. Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-8890 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-9074 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-9075 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-9076 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-9077 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-9242 Ref: https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.9.31 Signed-off-by: Jo-Philipp Wich <jo@mein.io>
210 lines
6.1 KiB
Diff
210 lines
6.1 KiB
Diff
From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001
|
|
From: Ram Chandra Jangir <rjangir@codeaurora.org>
|
|
Date: Thu, 20 Apr 2017 10:39:00 +0530
|
|
Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping
|
|
|
|
Add a new function to support for preparing DMA descriptor
|
|
for custom data.
|
|
|
|
Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
|
|
Signed-off-by: Ram Chandra Jangir <rjangir@codeaurora.org>
|
|
---
|
|
drivers/dma/qcom/bam_dma.c | 97 +++++++++++++++++++++++++++++++++++++---
|
|
include/linux/dma/qcom_bam_dma.h | 14 ++++++
|
|
include/linux/dmaengine.h | 14 ++++++
|
|
3 files changed, 119 insertions(+), 6 deletions(-)
|
|
|
|
--- a/drivers/dma/qcom/bam_dma.c
|
|
+++ b/drivers/dma/qcom/bam_dma.c
|
|
@@ -49,6 +49,7 @@
|
|
#include <linux/clk.h>
|
|
#include <linux/dmaengine.h>
|
|
#include <linux/pm_runtime.h>
|
|
+#include <linux/dma/qcom_bam_dma.h>
|
|
|
|
#include "../dmaengine.h"
|
|
#include "../virt-dma.h"
|
|
@@ -61,11 +62,6 @@ struct bam_desc_hw {
|
|
|
|
#define BAM_DMA_AUTOSUSPEND_DELAY 100
|
|
|
|
-#define DESC_FLAG_INT BIT(15)
|
|
-#define DESC_FLAG_EOT BIT(14)
|
|
-#define DESC_FLAG_EOB BIT(13)
|
|
-#define DESC_FLAG_NWD BIT(12)
|
|
-
|
|
struct bam_async_desc {
|
|
struct virt_dma_desc vd;
|
|
|
|
@@ -670,6 +666,93 @@ err_out:
|
|
}
|
|
|
|
/**
|
|
+ * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data
|
|
+ *
|
|
+ * @chan: dma channel
|
|
+ * @data: custom data
|
|
+ * @flags: DMA flags
|
|
+ */
|
|
+static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping(
|
|
+ struct dma_chan *chan,
|
|
+ void *data, unsigned long flags)
|
|
+{
|
|
+ struct bam_chan *bchan = to_bam_chan(chan);
|
|
+ struct bam_device *bdev = bchan->bdev;
|
|
+ struct bam_async_desc *async_desc;
|
|
+ struct qcom_bam_custom_data *desc_data = data;
|
|
+ u32 i;
|
|
+ struct bam_desc_hw *desc;
|
|
+ unsigned int num_alloc = 0;
|
|
+
|
|
+
|
|
+ if (!is_slave_direction(desc_data->dir)) {
|
|
+ dev_err(bdev->dev, "invalid dma direction\n");
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ /* calculate number of required entries */
|
|
+ for (i = 0; i < desc_data->sgl_cnt; i++)
|
|
+ num_alloc += DIV_ROUND_UP(
|
|
+ sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE);
|
|
+
|
|
+ /* allocate enough room to accommodate the number of entries */
|
|
+ async_desc = kzalloc(sizeof(*async_desc) +
|
|
+ (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
|
|
+
|
|
+ if (!async_desc)
|
|
+ goto err_out;
|
|
+
|
|
+ if (flags & DMA_PREP_FENCE)
|
|
+ async_desc->flags |= DESC_FLAG_NWD;
|
|
+
|
|
+ if (flags & DMA_PREP_INTERRUPT)
|
|
+ async_desc->flags |= DESC_FLAG_EOT;
|
|
+ else
|
|
+ async_desc->flags |= DESC_FLAG_INT;
|
|
+
|
|
+ async_desc->num_desc = num_alloc;
|
|
+ async_desc->curr_desc = async_desc->desc;
|
|
+ async_desc->dir = desc_data->dir;
|
|
+
|
|
+ /* fill in temporary descriptors */
|
|
+ desc = async_desc->desc;
|
|
+ for (i = 0; i < desc_data->sgl_cnt; i++) {
|
|
+ unsigned int remainder;
|
|
+ unsigned int curr_offset = 0;
|
|
+
|
|
+ remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl);
|
|
+
|
|
+ do {
|
|
+ desc->addr = cpu_to_le32(
|
|
+ sg_dma_address(&desc_data->bam_sgl[i].sgl) +
|
|
+ curr_offset);
|
|
+
|
|
+ if (desc_data->bam_sgl[i].dma_flags)
|
|
+ desc->flags |= cpu_to_le16(
|
|
+ desc_data->bam_sgl[i].dma_flags);
|
|
+
|
|
+ if (remainder > BAM_FIFO_SIZE) {
|
|
+ desc->size = cpu_to_le16(BAM_FIFO_SIZE);
|
|
+ remainder -= BAM_FIFO_SIZE;
|
|
+ curr_offset += BAM_FIFO_SIZE;
|
|
+ } else {
|
|
+ desc->size = cpu_to_le16(remainder);
|
|
+ remainder = 0;
|
|
+ }
|
|
+
|
|
+ async_desc->length += desc->size;
|
|
+ desc++;
|
|
+ } while (remainder > 0);
|
|
+ }
|
|
+
|
|
+ return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
|
|
+
|
|
+err_out:
|
|
+ kfree(async_desc);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
* bam_dma_terminate_all - terminate all transactions on a channel
|
|
* @bchan: bam dma channel
|
|
*
|
|
@@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_cha
|
|
|
|
/* set any special flags on the last descriptor */
|
|
if (async_desc->num_desc == async_desc->xfer_len)
|
|
- desc[async_desc->xfer_len - 1].flags =
|
|
+ desc[async_desc->xfer_len - 1].flags |=
|
|
cpu_to_le16(async_desc->flags);
|
|
else
|
|
desc[async_desc->xfer_len - 1].flags |=
|
|
@@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform
|
|
bdev->common.device_alloc_chan_resources = bam_alloc_chan;
|
|
bdev->common.device_free_chan_resources = bam_free_chan;
|
|
bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
|
|
+ bdev->common.device_prep_dma_custom_mapping =
|
|
+ bam_prep_dma_custom_mapping;
|
|
bdev->common.device_config = bam_slave_config;
|
|
bdev->common.device_pause = bam_pause;
|
|
bdev->common.device_resume = bam_resume;
|
|
--- a/include/linux/dma/qcom_bam_dma.h
|
|
+++ b/include/linux/dma/qcom_bam_dma.h
|
|
@@ -65,6 +65,19 @@ enum bam_command_type {
|
|
};
|
|
|
|
/*
|
|
+ * QCOM BAM DMA custom data
|
|
+ *
|
|
+ * @sgl_cnt: number of sgl in bam_sgl
|
|
+ * @dir: DMA data transfer direction
|
|
+ * @bam_sgl: BAM SGL pointer
|
|
+ */
|
|
+struct qcom_bam_custom_data {
|
|
+ u32 sgl_cnt;
|
|
+ enum dma_transfer_direction dir;
|
|
+ struct qcom_bam_sgl *bam_sgl;
|
|
+};
|
|
+
|
|
+/*
|
|
* qcom_bam_sg_init_table - Init QCOM BAM SGL
|
|
* @bam_sgl: bam sgl
|
|
* @nents: number of entries in bam sgl
|
|
--- a/include/linux/dmaengine.h
|
|
+++ b/include/linux/dmaengine.h
|
|
@@ -692,6 +692,8 @@ struct dma_filter {
|
|
* be called after period_len bytes have been transferred.
|
|
* @device_prep_interleaved_dma: Transfer expression in a generic way.
|
|
* @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
|
|
+ * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver
|
|
+ * specific custom data
|
|
* @device_config: Pushes a new configuration to a channel, return 0 or an error
|
|
* code
|
|
* @device_pause: Pauses any transfer happening on a channel. Returns
|
|
@@ -783,6 +785,9 @@ struct dma_device {
|
|
struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
|
|
struct dma_chan *chan, dma_addr_t dst, u64 data,
|
|
unsigned long flags);
|
|
+ struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)(
|
|
+ struct dma_chan *chan, void *data,
|
|
+ unsigned long flags);
|
|
|
|
int (*device_config)(struct dma_chan *chan,
|
|
struct dma_slave_config *config);
|
|
@@ -899,6 +904,15 @@ static inline struct dma_async_tx_descri
|
|
src_sg, src_nents, flags);
|
|
}
|
|
|
|
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping(
|
|
+ struct dma_chan *chan,
|
|
+ void *data,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ return chan->device->device_prep_dma_custom_mapping(chan, data,
|
|
+ flags);
|
|
+}
|
|
+
|
|
/**
|
|
* dmaengine_terminate_all() - Terminate all active DMA transfers
|
|
* @chan: The channel for which to terminate the transfers
|