blob: e124ce26feed42e2ac5db84a9d5d4141f2ff0a5b [file] [log] [blame]
/*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* 2002 Adam J. Richter <adam@yggdrasil.com>
* 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/scatterwalk.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
void *src = out ? buf : sgdata;
void *dst = out ? sgdata : buf;
memcpy(dst, src, nbytes);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
walk->sg = sg;
BUG_ON(!sg->length);
walk->offset = sg->offset;
}
EXPORT_SYMBOL_GPL(scatterwalk_start);
void *scatterwalk_map(struct scatter_walk *walk)
{
return kmap_atomic(scatterwalk_page(walk)) +
offset_in_page(walk->offset);
}
EXPORT_SYMBOL_GPL(scatterwalk_map);
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
if (out) {
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
/* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
* PageSlab cannot be optimised away per se due to
* use of volatile pointer.
*/
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
flush_dcache_page(page);
}
if (more) {
walk->offset += PAGE_SIZE - 1;
walk->offset &= PAGE_MASK;
if (walk->offset >= walk->sg->offset + walk->sg->length)
scatterwalk_start(walk, sg_next(walk->sg));
}
}
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(walk);
u8 *vaddr;
if (len_this_page > nbytes)
len_this_page = nbytes;
if (out != 2) {
vaddr = scatterwalk_map(walk);
memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr);
}
scatterwalk_advance(walk, len_this_page);
if (nbytes == len_this_page)
break;
buf += len_this_page;
nbytes -= len_this_page;
scatterwalk_pagedone(walk, out & 1, 1);
}
}
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out)
{
struct scatter_walk walk;
struct scatterlist tmp[2];
if (!nbytes)
return;
sg = scatterwalk_ffwd(tmp, sg, start);
if (sg_page(sg) == virt_to_page(buf) &&
sg->offset == offset_in_page(buf))
return;
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)
{
for (;;) {
if (!len)
return src;
if (src->length > len)
break;
len -= src->length;
src = sg_next(src);
}
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);