Search is not available for this dataset
id
stringlengths
1
8
text
stringlengths
72
9.81M
addition_count
int64
0
10k
commit_subject
stringlengths
0
3.7k
deletion_count
int64
0
8.43k
file_extension
stringlengths
0
32
lang
stringlengths
1
94
license
stringclasses
10 values
repo_name
stringlengths
9
59
1700
<NME> RedisCacheManager.java <BEF> package org.crazycake.shiro; import org.apache.shiro.cache.Cache; import org.apache.shiro.cache.CacheException; import org.apache.shiro.cache.CacheManager; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; public class RedisCacheManager implements CacheManager { private final Logger logger = LoggerFactory.getLogger(RedisCacheManager.class); // fast lookup by name map private final ConcurrentMap<String, Cache> caches = new ConcurrentHashMap<>(); private RedisSerializer keySerializer = new StringSerializer(); private RedisSerializer valueSerializer = new ObjectSerializer(); private IRedisManager redisManager; // expire time in seconds private static final int DEFAULT_EXPIRE = 1800; private int expire = DEFAULT_EXPIRE; /** * The Redis key prefix for caches */ public static final String DEFAULT_CACHE_KEY_PREFIX = "shiro:cache:"; private String keyPrefix = DEFAULT_CACHE_KEY_PREFIX; public static final String DEFAULT_PRINCIPAL_ID_FIELD_NAME = "id"; private String principalIdFieldName = DEFAULT_PRINCIPAL_ID_FIELD_NAME; @Override public <K, V> Cache<K, V> getCache(String name) throws CacheException { logger.debug("get cache, name=" + name); Cache<K, V> cache = caches.get(name); if (cache == null) { cache = new RedisCache<K, V>(redisManager, keySerializer, valueSerializer, keyPrefix + name + ":", expire, principalIdFieldName); caches.put(name, cache); } return cache; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public String getPrincipalIdFieldName() { return principalIdFieldName; } public void setPrincipalIdFieldName(String principalIdFieldName) { this.principalIdFieldName = principalIdFieldName; } } <MSG> Fix authorization cannot be saved as TTL=-1 bug. Remove DEFAULT_EXPIRE of RedisManager. <DFF> @@ -24,7 +24,7 @@ public class RedisCacheManager implements CacheManager { private IRedisManager redisManager; // expire time in seconds - private static final int DEFAULT_EXPIRE = 1800; + public static final int DEFAULT_EXPIRE = 1800; private int expire = DEFAULT_EXPIRE; /**
1
Fix authorization cannot be saved as TTL=-1 bug. Remove DEFAULT_EXPIRE of RedisManager.
1
.java
java
mit
alexxiyang/shiro-redis
1701
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` You are all set. ## If you have created your own `SessionManager` or `SessionsSecurityManager` <MSG> Update README.md <DFF> @@ -364,7 +364,7 @@ You will have to inject them by yourself. for more deail, see below ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` -You are all set. +You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager`
1
Update README.md
1
.md
md
mit
alexxiyang/shiro-redis
1702
<NME> RedisSentinelManager.java <BEF> package org.crazycake.shiro; import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.JedisSentinelPool; import redis.clients.jedis.Protocol; import java.util.Collections; import java.util.HashSet; import java.util.Set; public class RedisSentinelManager extends WorkAloneRedisManager implements IRedisManager { private static final String DEFAULT_HOST = "127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"; private String host = DEFAULT_HOST; private static final String DEFAULT_MASTER_NAME = "mymaster"; private String masterName = DEFAULT_MASTER_NAME; // timeout for jedis try to connect to redis server, not expire time! In milliseconds private int timeout = Protocol.DEFAULT_TIMEOUT; // timeout for jedis try to read data from redis server private int soTimeout = Protocol.DEFAULT_TIMEOUT; private String password; private int database = Protocol.DEFAULT_DATABASE; private JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); private void init() { synchronized (this) { synchronized (RedisSentinelManager.class) { if (jedisPool == null) { String[] sentinelHosts = host.split(",\\s*"); Set<String> sentinels = new HashSet<String>(); Collections.addAll(sentinels, sentinelHosts); jedisPool = new JedisSentinelPool(masterName, sentinels, getJedisPoolConfig(), timeout, soTimeout, password, database); } } @Override protected void checkAndInit() { if (jedisPool == null) { init(); } } public String getHost() { return host; } return host; } this.host = host; } public int getTimeout() { return timeout; } return timeout; } public void setTimeout(int timeout) { this.timeout = timeout; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public int getDatabase() { return database; } public void setDatabase(int database) { this.database = database; } public String getMasterName() { return masterName; } this.masterName = masterName; } public JedisPoolConfig getJedisPoolConfig() { return jedisPoolConfig; } public void setJedisPoolConfig(JedisPoolConfig jedisPoolConfig) { this.jedisPoolConfig = jedisPoolConfig; } public int getSoTimeout() { return soTimeout; } return soTimeout; public void setSoTimeout(int soTimeout) { this.soTimeout = soTimeout; } } public JedisSentinelPool getJedisPool() { return jedisPool; } public void setJedisPool(JedisSentinelPool jedisPool) { this.jedisPool = jedisPool; } } <MSG> Merge branch 'master' of https://github.com/alexxiyang/shiro-redis <DFF> @@ -1,5 +1,6 @@ package org.crazycake.shiro; +import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.JedisSentinelPool; import redis.clients.jedis.Protocol; @@ -26,7 +27,15 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana private int database = Protocol.DEFAULT_DATABASE; - private JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); + private JedisSentinelPool jedisPool; + + @Override + protected Jedis getJedis() { + if(jedisPool == null){ + init(); + } + return jedisPool.getResource(); + } private void init() { synchronized (this) { @@ -39,13 +48,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana } } - @Override - protected void checkAndInit() { - if (jedisPool == null) { - init(); - } - } - public String getHost() { return host; } @@ -54,8 +56,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana this.host = host; } - - public int getTimeout() { return timeout; } @@ -88,14 +88,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana this.masterName = masterName; } - public JedisPoolConfig getJedisPoolConfig() { - return jedisPoolConfig; - } - - public void setJedisPoolConfig(JedisPoolConfig jedisPoolConfig) { - this.jedisPoolConfig = jedisPoolConfig; - } - public int getSoTimeout() { return soTimeout; } @@ -103,4 +95,5 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana public void setSoTimeout(int soTimeout) { this.soTimeout = soTimeout; } + }
11
Merge branch 'master' of https://github.com/alexxiyang/shiro-redis
18
.java
java
mit
alexxiyang/shiro-redis
1703
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, check_output, PIPE, CalledProcessError from click import command, group, argument, option from sys import stderr, exit, modules, argv from os import path, makedirs, listdir import hitchdir import shutil import signal import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) command = [binfile, ] + argv[2:] # Stop responding to signals - the calling command should take over. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGQUIT, signal.SIG_IGN) return_code = call(command) exit(return_code) else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") # Store absolute directory in .hitch directory to guard against the directory being moved hitch_dir = path.abspath(".hitch") with open(path.join(hitch_dir, "absdir"), "w") as absdir_handle: absdir_handle.write(hitch_dir) pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip")) try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> BUG : Exit signals were not being distributed properly to the process group. This fixes that. <DFF> @@ -2,7 +2,7 @@ from subprocess import call, check_output, PIPE, CalledProcessError from click import command, group, argument, option from sys import stderr, exit, modules, argv -from os import path, makedirs, listdir +from os import path, makedirs, listdir, getpgrp, killpg import hitchdir import shutil import signal @@ -75,11 +75,14 @@ def runpackage(arguments): binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) command = [binfile, ] + argv[2:] - # Stop responding to signals - the calling command should take over. - signal.signal(signal.SIGINT, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - signal.signal(signal.SIGHUP, signal.SIG_IGN) - signal.signal(signal.SIGQUIT, signal.SIG_IGN) + # When receiving a signal, distribute it to the process group + def distribute_signal_to_process_group(signum, frame): + killpg(getpgrp(), signum) + + signal.signal(signal.SIGINT, distribute_signal_to_process_group) + signal.signal(signal.SIGTERM, distribute_signal_to_process_group) + signal.signal(signal.SIGHUP, distribute_signal_to_process_group) + signal.signal(signal.SIGQUIT, distribute_signal_to_process_group) return_code = call(command) exit(return_code)
9
BUG : Exit signals were not being distributed properly to the process group. This fixes that.
6
.py
py
agpl-3.0
hitchtest/hitch
1704
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! view [Documentation](http://alexxiyang.github.io/shiro-redis/) <MSG> Use github Page <DFF> @@ -1,9 +1,11 @@ shiro-redis ============= -[![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) +## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! +## Documentation + +Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). -view [Documentation](http://alexxiyang.github.io/shiro-redis/)
4
Use github Page
2
.md
md
mit
alexxiyang/shiro-redis
1705
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | keySerializer | `new org.crazycake.shiro.serializer.StringSerializer()` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `new org.crazycake.shiro.serializer.ObjectSerializer()` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `new org.crazycake.shiro.serializer.StringSerializer()` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `new org.crazycake.shiro.serializer.ObjectSerializer()` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # If you found any bugs <MSG> Update README.md <DFF> @@ -318,8 +318,8 @@ These 4 Serializers are replaceable: | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | -| keySerializer | `new org.crazycake.shiro.serializer.StringSerializer()` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | -| valueSerializer | `new org.crazycake.shiro.serializer.ObjectSerializer()` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | +| keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | +| valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager @@ -329,8 +329,8 @@ These 4 Serializers are replaceable: | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | -| keySerializer | `new org.crazycake.shiro.serializer.StringSerializer()` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | -| valueSerializer | `new org.crazycake.shiro.serializer.ObjectSerializer()` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | +| keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | +| valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # If you found any bugs
4
Update README.md
4
.md
md
mit
alexxiyang/shiro-redis
1706
<NME> RedisCacheManagerTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.cache.Cache; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.is; import static org.mockito.Mockito.*; public class RedisCacheManagerTest { private IRedisManager redisManager; private RedisCacheManager redisCacheManager; @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } @Test public void testInitWithoutSettingRedisManager() { redisCacheManager = new RedisCacheManager(); Assertions.assertThrows(IllegalArgumentException.class, () -> { redisCacheManager.getCache("testCache"); Cache cache1 = redisCacheManager.getCache("testCache1"); assertThat(cache,is(cache1)); redisCacheManager.setKeyPrefix("testRedisManager1"); Cache cache2 = redisCacheManager.getCache("testCache2"); assertThat(cache2.getClass().getName(), is("org.crazycake.shiro.RedisCache")); RedisCache redisCache2 = (RedisCache) cache2; assertThat(redisCache2.getKeyPrefix(), is("testRedisManager1")); } } assertThat(redisTestCache.getKeyPrefix(), is("testRedisManager1:testCache:")); assertThat(redisTestCache.getPrincipalIdFieldName(), is("id")); } } <MSG> Merge pull request #33 from ThePigsy/work-timeout 1、 在org.crazycake.shiro.RedisCache#put(key,value)时, 允许设置超时时间。 <DFF> @@ -26,12 +26,12 @@ public class RedisCacheManagerTest { Cache cache1 = redisCacheManager.getCache("testCache1"); assertThat(cache,is(cache1)); - redisCacheManager.setKeyPrefix("testRedisManager1"); + redisCacheManager.setKeyPrefix("testRedisManager1:"); Cache cache2 = redisCacheManager.getCache("testCache2"); assertThat(cache2.getClass().getName(), is("org.crazycake.shiro.RedisCache")); RedisCache redisCache2 = (RedisCache) cache2; - assertThat(redisCache2.getKeyPrefix(), is("testRedisManager1")); + assertThat(redisCache2.getKeyPrefix(), is("testRedisManager1:testCache2:")); } }
2
Merge pull request #33 from ThePigsy/work-timeout
2
.java
java
mit
alexxiyang/shiro-redis
1707
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, help=languagestrings.SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH ) def init(python, virtualenv): """Initialize hitch in this directory.""" if virtualenv is None: if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_VIRTUALENV_INSTALLED) stderr.flush() stderr.write("{0} not found.\n".format(python)) exit(1) str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") # Store absolute directory in .hitch directory to guard against the directory being moved hitch_dir = path.abspath(".hitch") with open(path.join(hitch_dir, "absdir"), "w") as absdir_handle: absdir_handle.write(hitch_dir) pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip")) try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> Merge branch 'master' of github.com:hitchtest/hitch <DFF> @@ -83,8 +83,10 @@ def init(python, virtualenv): stderr.write("{0} not found.\n".format(python)) exit(1) - str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') - tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) + python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') + replacements = ('Python ', ''), ('\n', '') + str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) + tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33)
4
Merge branch 'master' of github.com:hitchtest/hitch
2
.py
py
agpl-3.0
hitchtest/hitch
1708
<NME> shiro-standalone.ini <BEF> ADDFILE <MSG> Use pillow test to refactor unit tests. Fix defeats <DFF> @@ -0,0 +1,4 @@ +redisManager.host = 192.168.10.10:6379 +redisSessionDAO.expire = 30 +cacheManager.expire = 30 +cacheManager.principalIdFieldName = userId \ No newline at end of file
4
Use pillow test to refactor unit tests. Fix defeats
0
.ini
ini
mit
alexxiyang/shiro-redis
1709
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. > Please use 3.3.1 which is compiled in java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0 (java11) | 1.6.0 | 3.3.0 | | 3.3.1 (java8) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | # If you found any bugs Please create the issue | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Add `working with spring-boot-devtools` <DFF> @@ -428,6 +428,12 @@ For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alex | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | +## Working with `spring-boot-devtools` +If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): +```ini +restart.include.shiro-redis=/shiro-[\\w-\\.]+jar +``` + # If you found any bugs Please create the issue
6
Add `working with spring-boot-devtools`
0
.md
md
mit
alexxiyang/shiro-redis
1710
<NME> RedisCacheTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.subject.PrincipalCollection; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Set; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; import static org.mockito.Mockito.*; public class RedisCacheTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisCache mountRedisCache() { return new RedisCache(redisManager, new StringSerializer(), new ObjectSerializer(), "employee:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME); } @Test public void testInitialize() { Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(null, null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), new StringSerializer(), null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); } @Test public void testPut() throws SerializationException { RedisCache rc = mountRedisCache(); Object value = rc.put("foo", "bar"); assertThat(value, is("bar")); verify(redisManager).set(keySerializer.serialize("employee:foo"), valueSerializer.serialize("bar"), 1); PrincipalCollection principal = new EmployeePrincipal(3); rc.put(principal, "account information"); verify(redisManager).set(keySerializer.serialize("employee:3"), valueSerializer.serialize("account information"), 1); } } class Employee { private int id; public Employee(int id) { this.id = id; } public int getId() { return this.id; } } class EmployeePrincipal implements PrincipalCollection { private Employee primaryPrincipal; public EmployeePrincipal(int id) { this.primaryPrincipal = new Employee(id); } @Override public Employee getPrimaryPrincipal() { return this.primaryPrincipal; } @Override public <T> T oneByType(Class<T> aClass) { return null; } @Override public <T> Collection<T> byType(Class<T> aClass) { return null; } @Override public List asList() { return null; } @Override public Set asSet() { return null; } public void testSize() throws InterruptedException { doPutAuth(redisCache, user1); doPutAuth(redisCache, user2); Thread.sleep(500); assertEquals(redisCache.size(), 2); } public Set<String> getRealmNames() { return null; } @Override public boolean isEmpty() { return false; } @Override public Iterator iterator() { return null; } } <MSG> Modify RedisCacheTest.testSize() <DFF> @@ -99,7 +99,7 @@ public class RedisCacheTest { public void testSize() throws InterruptedException { doPutAuth(redisCache, user1); doPutAuth(redisCache, user2); - Thread.sleep(500); + Thread.sleep(800); assertEquals(redisCache.size(), 2); }
1
Modify RedisCacheTest.testSize()
1
.java
java
mit
alexxiyang/shiro-redis
1711
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): stderr.write("{0} not found.\n".format(python)) exit(1) str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) ) def init(python, virtualenv): """Initialize hitch in this directory.""" if virtualenv is None: if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_VIRTUALENV_INSTALLED) stderr.flush() exit(1) virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "") else: if path.exists(virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") # Store absolute directory in .hitch directory to guard against the directory being moved hitch_dir = path.abspath(".hitch") with open(path.join(hitch_dir, "absdir"), "w") as absdir_handle: absdir_handle.write(hitch_dir) pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip")) try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> Fix issue caused by Python 3 version including a letter. <DFF> @@ -66,8 +66,10 @@ def init(python, virtualenv): stderr.write("{0} not found.\n".format(python)) exit(1) - str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') - tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) + python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') + replacements = ('Python ', ''), ('\n', '') + str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) + tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33)
4
Fix issue caused by Python 3 version including a letter.
2
.py
py
agpl-3.0
hitchtest/hitch
1712
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>2.8.24</version> </dependency> ``` <MSG> Prepare for 3.0.0 <DFF> @@ -16,7 +16,7 @@ You can choose these 2 ways to include shiro-redis into your project <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> - <version>2.8.24</version> + <version>3.0.0</version> </dependency> ```
1
Prepare for 3.0.0
1
.md
md
mit
alexxiyang/shiro-redis
1713
<NME> RedisSessionDAOTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.InvalidSessionException; import org.apache.shiro.session.Session; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.Serializable; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Set; public class RedisSessionDAOTest { private RedisManager redisManager; private RedisSessionDAO redisSessionDAO; private StringSerializer keySerializer; private String testKey; public class RedisSessionDAOTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisSessionDAO mountRedisSessionDAO(Integer expire) { RedisSessionDAO redisSessionDAO = new RedisSessionDAO(); if (expire != null) { redisSessionDAO.setExpire(expire); } redisSessionDAO.setKeyPrefix("student:"); redisSessionDAO.setRedisManager(redisManager); return redisSessionDAO; } @Test public void testUpdate() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(99, 2000); testValues.add(paulSession); billySession = new FakeSession(3, "billy"); testValues.add(billySession); redisManager = mock(RedisManager.class); when(redisManager.dbSize()).thenReturn(2L); when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSeralizer.serialize(testValue)); when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet); StudentSession session = new StudentSession(98, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:98"), valueSerializer.serialize(session), 3); } @Test public void testUpdateByNoExpire() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(-1); StudentSession session = new StudentSession(97, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:97"), valueSerializer.serialize(session), -1); } @Test public void testDelete() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(96, 1000); sessionDAO.delete(session); verify(redisManager).del(keySerializer.serialize("student:96")); } @Test public void testGetActiveSessions() throws SerializationException { Set<byte[]> mockKeys = new HashSet<byte[]>(); mockKeys.add(keySerializer.serialize("student:1")); mockKeys.add(keySerializer.serialize("student:2")); when(redisManager.keys(keySerializer.serialize("student:*"))).thenReturn(mockKeys); StudentSession mockSession1 = new StudentSession(1, 2000); StudentSession mockSession2 = new StudentSession(2, 2000); when(redisManager.get(keySerializer.serialize("student:1"))).thenReturn(valueSerializer.serialize(mockSession1)); when(redisManager.get(keySerializer.serialize("student:2"))).thenReturn(valueSerializer.serialize(mockSession2)); RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); assertThat(sessionDAO.getActiveSessions().size(), is(2)); } } class StudentSession implements Session, Serializable { private Integer id; private long timeout; public StudentSession(Integer id, long timeout) { this.id = id; this.timeout = timeout; } @Override public Serializable getId() { return id; } @Override public Date getStartTimestamp() { return null; } @Override public Date getLastAccessTime() { return null; } @Override public long getTimeout() throws InvalidSessionException { return timeout; } @Override public void setTimeout(long l) throws InvalidSessionException { } @Override public String getHost() { return null; } @Override public void touch() throws InvalidSessionException { } @Override public void stop() throws InvalidSessionException { } @Override public Collection<Object> getAttributeKeys() throws InvalidSessionException { return null; } @Override public Object getAttribute(Object o) throws InvalidSessionException { return null; } @Override public void setAttribute(Object o, Object o1) throws InvalidSessionException { } @Override public Object removeAttribute(Object o) throws InvalidSessionException { return null; } } <MSG> Merge pull request #37 from xchendeveloper/redis-manager-refactor Redis manager refactor <DFF> @@ -16,7 +16,7 @@ import static org.mockito.Mockito.when; public class RedisSessionDAOTest { - private RedisManager redisManager; + private RedisSingletonManager redisManager; private RedisSessionDAO redisSessionDAO; private StringSerializer keySerializer; private String testKey; @@ -47,7 +47,7 @@ public class RedisSessionDAOTest { testValues.add(paulSession); billySession = new FakeSession(3, "billy"); testValues.add(billySession); - redisManager = mock(RedisManager.class); + redisManager = mock(RedisSingletonManager.class); when(redisManager.dbSize()).thenReturn(2L); when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSeralizer.serialize(testValue)); when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet);
2
Merge pull request #37 from xchendeveloper/redis-manager-refactor
2
.java
java
mit
alexxiyang/shiro-redis
1714
<NME> RedisCache.java <BEF> package org.crazycake.shiro; import org.apache.shiro.cache.Cache; import org.apache.shiro.cache.CacheException; import org.apache.shiro.subject.PrincipalCollection; import org.apache.shiro.util.CollectionUtils; import org.crazycake.shiro.exception.CacheManagerPrincipalIdNotAssignedException; import org.crazycake.shiro.exception.PrincipalIdNullException; import org.crazycake.shiro.exception.PrincipalInstanceException; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.*; /** * Used for setting/getting authorization information from Redis * @param <K> * @param <V> */ public class RedisCache<K, V> implements Cache<K, V> { private static Logger logger = LoggerFactory.getLogger(RedisCache.class); private RedisSerializer keySerializer; private RedisSerializer valueSerializer; private IRedisManager redisManager; private String keyPrefix = RedisCacheManager.DEFAULT_CACHE_KEY_PREFIX; private int expire; private String principalIdFieldName = RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME; /** * * @param redisManager redisManager * @param keySerializer keySerializer * @param valueSerializer valueSerializer * @param prefix authorization prefix * @param expire expire * @param principalIdFieldName id field name of principal object */ public RedisCache(IRedisManager redisManager, RedisSerializer keySerializer, RedisSerializer valueSerializer, String prefix, int expire, String principalIdFieldName) { if (redisManager == null) { throw new IllegalArgumentException("redisManager cannot be null."); } this.redisManager = redisManager; if (keySerializer == null) { throw new IllegalArgumentException("keySerializer cannot be null."); } this.keySerializer = keySerializer; if (valueSerializer == null) { throw new IllegalArgumentException("valueSerializer cannot be null."); } this.valueSerializer = valueSerializer; if (prefix != null && !"".equals(prefix)) { this.keyPrefix = prefix; } this.expire = expire; if (principalIdFieldName != null) { this.principalIdFieldName = principalIdFieldName; } } /** * get shiro authorization redis key-value * @param key key * @return value * @throws CacheException get cache exception */ @Override public V get(K key) throws CacheException { logger.debug("get key [" + key + "]"); if (key == null) { return null; } try { Object redisCacheKey = getRedisCacheKey(key); byte[] rawValue = redisManager.get(keySerializer.serialize(redisCacheKey)); if (rawValue == null) { return null; } V value = (V) valueSerializer.deserialize(rawValue); return value; } catch (SerializationException e) { throw new CacheException(e); } } @Override public V put(K key, V value) throws CacheException { if (key == null) { logger.warn("Saving a null key is meaningless, return value directly without call Redis."); return value; } try { Object redisCacheKey = getRedisCacheKey(key); logger.debug("put key [" + redisCacheKey + "]"); redisManager.set(keySerializer.serialize(redisCacheKey), value != null ? valueSerializer.serialize(value) : null, expire); return value; } catch (SerializationException e) { throw new CacheException(e); } } @Override public V remove(K key) throws CacheException { logger.debug("remove key [" + key + "]"); if (key == null) { return null; } try { Object redisCacheKey = getRedisCacheKey(key); byte[] rawValue = redisManager.get(keySerializer.serialize(redisCacheKey)); V previous = (V) valueSerializer.deserialize(rawValue); redisManager.del(keySerializer.serialize(redisCacheKey)); return previous; } catch (SerializationException e) { throw new CacheException(e); } } /** * get the full Redis key including prefix by Redis key * @param key * @return */ private Object getRedisCacheKey(K key) { if (key == null) { return null; } if (keySerializer instanceof StringSerializer) { return this.keyPrefix + getStringRedisKey(key); } return key; } private String getRedisKeyFromPrincipalIdField(PrincipalCollection key) { Object principalObject = key.getPrimaryPrincipal(); Method pincipalIdGetter = getPrincipalIdGetter(principalObject); return getIdObj(principalObject, pincipalIdGetter); } } else { redisKey = key.toString(); } return redisKey; } /** * get the Redis key (not including prefix) by PrincipalCollection * @param key * @return */ private String getRedisKeyFromPrincipalIdField(PrincipalCollection key) { Object principalObject = key.getPrimaryPrincipal(); if (principalObject instanceof String) { return principalObject.toString(); } Method pincipalIdGetter = getPrincipalIdGetter(principalObject); return getIdObj(principalObject, pincipalIdGetter); } private String getIdObj(Object principalObject, Method pincipalIdGetter) { String redisKey; try { Object idObj = pincipalIdGetter.invoke(principalObject); if (idObj == null) { throw new PrincipalIdNullException(principalObject.getClass(), this.principalIdFieldName); } redisKey = idObj.toString(); } catch (IllegalAccessException e) { throw new PrincipalInstanceException(principalObject.getClass(), this.principalIdFieldName, e); } catch (InvocationTargetException e) { throw new PrincipalInstanceException(principalObject.getClass(), this.principalIdFieldName, e); } return redisKey; } private Method getPrincipalIdGetter(Object principalObject) { Method pincipalIdGetter = null; String principalIdMethodName = this.getPrincipalIdMethodName(); try { pincipalIdGetter = principalObject.getClass().getMethod(principalIdMethodName); } catch (NoSuchMethodException e) { throw new PrincipalInstanceException(principalObject.getClass(), this.principalIdFieldName); } return pincipalIdGetter; } private String getPrincipalIdMethodName() { if (this.principalIdFieldName == null || "".equals(this.principalIdFieldName)) { throw new CacheManagerPrincipalIdNotAssignedException(); } return "get" + this.principalIdFieldName.substring(0, 1).toUpperCase() + this.principalIdFieldName.substring(1); } @Override public void clear() throws CacheException { logger.debug("clear cache"); Set<byte[]> keys = null; try { keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); } catch (SerializationException e) { logger.error("get keys error", e); } if (keys == null || keys.size() == 0) { return; } for (byte[] key: keys) { redisManager.del(key); } } /** * get all authorization key-value quantity * @return key-value size */ @Override public int size() { Long longSize = 0L; try { longSize = new Long(redisManager.dbSize(keySerializer.serialize(this.keyPrefix + "*"))); } catch (SerializationException e) { logger.error("get keys error", e); } return longSize.intValue(); } @SuppressWarnings("unchecked") @Override public Set<K> keys() { Set<byte[]> keys = null; try { keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); } catch (SerializationException e) { logger.error("get keys error", e); return Collections.emptySet(); } if (CollectionUtils.isEmpty(keys)) { return Collections.emptySet(); } Set<K> convertedKeys = new HashSet<K>(); for (byte[] key:keys) { try { convertedKeys.add((K) keySerializer.deserialize(key)); } catch (SerializationException e) { logger.error("deserialize keys error", e); } } return convertedKeys; } @Override public Collection<V> values() { Set<byte[]> keys = null; try { keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); } catch (SerializationException e) { logger.error("get values error", e); return Collections.emptySet(); } if (CollectionUtils.isEmpty(keys)) { return Collections.emptySet(); } List<V> values = new ArrayList<V>(keys.size()); for (byte[] key : keys) { V value = null; try { value = (V) valueSerializer.deserialize(redisManager.get(key)); } catch (SerializationException e) { logger.error("deserialize values= error", e); } if (value != null) { values.add(value); } } return Collections.unmodifiableList(values); } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public String getPrincipalIdFieldName() { return principalIdFieldName; } public void setPrincipalIdFieldName(String principalIdFieldName) { this.principalIdFieldName = principalIdFieldName; } } <MSG> add check for String <DFF> @@ -142,7 +142,9 @@ public class RedisCache<K, V> implements Cache<K, V> { private String getRedisKeyFromPrincipalIdField(PrincipalCollection key) { Object principalObject = key.getPrimaryPrincipal(); - + if (principalObject instanceof String) { + return principalObject.toString(); + } Method pincipalIdGetter = getPrincipalIdGetter(principalObject); return getIdObj(principalObject, pincipalIdGetter); }
3
add check for String
1
.java
java
mit
alexxiyang/shiro-redis
1715
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.0</version> </dependency> ``` > **Note:**\ > Do not use version < 3.1.0\ ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0 | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Use jdk 1.8 in 3.3.1 <DFF> @@ -16,19 +16,21 @@ You use either of the following 2 ways to include `shiro-redis` into your projec <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> - <version>3.3.0</version> + <version>3.3.1</version> </dependency> ``` -> **Note:**\ -> Do not use version < 3.1.0\ +> **Note:** +> 3.3.0 is compiled by java11 +> 3.3.1 is compiled by java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | -| 3.3.0 | 1.6.0 | 3.3.0 | +| 3.3.0 (java11) | 1.6.0 | 3.3.0 | +| 3.3.1 (java8) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>`
6
Use jdk 1.8 in 3.3.1
4
.md
md
mit
alexxiyang/shiro-redis
1716
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. > Please use 3.3.1 which is compiled in java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0 (java11) | 1.6.0 | 3.3.0 | | 3.3.1 (java8) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.2.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Upgrade shiro-spring-boot-starter version <DFF> @@ -356,7 +356,7 @@ First include the `shiro-redis` Spring boot starter dependency in you applicatio <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> - <version>3.2.1</version> + <version>3.3.1</version> </dependency> ```
1
Upgrade shiro-spring-boot-starter version
1
.md
md
mit
alexxiyang/shiro-redis
1717
<NME> RedisSessionDAOTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.InvalidSessionException; import org.apache.shiro.session.Session; import org.crazycake.shiro.common.IRedisManager; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.Serializable; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; public class RedisSessionDAOTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisSessionDAO mountRedisSessionDAO(Integer expire) { RedisSessionDAO redisSessionDAO = new RedisSessionDAO(); if (expire != null) { redisSessionDAO.setExpire(expire); } redisSessionDAO.setKeyPrefix("student:"); redisSessionDAO.setRedisManager(redisManager); return redisSessionDAO; } @Test public void testUpdate() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(99, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:99"), valueSerializer.serialize(session), 2); } @Test public void testUpdateByCustomExpire() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(3); StudentSession session = new StudentSession(98, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:98"), valueSerializer.serialize(session), 3); } @Test public void testUpdateByNoExpire() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(-1); StudentSession session = new StudentSession(97, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:97"), valueSerializer.serialize(session), -1); } @Test public void testDelete() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(96, 1000); sessionDAO.delete(session); verify(redisManager).del(keySerializer.serialize("student:96")); } @Test public void testGetActiveSessions() throws SerializationException { Set<byte[]> mockKeys = new HashSet<byte[]>(); mockKeys.add(keySerializer.serialize("student:1")); mockKeys.add(keySerializer.serialize("student:2")); when(redisManager.keys(keySerializer.serialize("student:*"))).thenReturn(mockKeys); StudentSession mockSession1 = new StudentSession(1, 2000); StudentSession mockSession2 = new StudentSession(2, 2000); when(redisManager.get(keySerializer.serialize("student:1"))).thenReturn(valueSerializer.serialize(mockSession1)); when(redisManager.get(keySerializer.serialize("student:2"))).thenReturn(valueSerializer.serialize(mockSession2)); RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); assertThat(sessionDAO.getActiveSessions().size(), is(2)); } } class StudentSession implements Session, Serializable { private Integer id; private long timeout; public StudentSession(Integer id, long timeout) { this.id = id; this.timeout = timeout; } @Override public Serializable getId() { return id; } @Override public Date getStartTimestamp() { return null; } @Override public Date getLastAccessTime() { return null; } @Override public long getTimeout() throws InvalidSessionException { return timeout; } @Override public void setTimeout(long l) throws InvalidSessionException { } @Override public String getHost() { return null; } @Override public void touch() throws InvalidSessionException { } @Override public void stop() throws InvalidSessionException { } @Override public Collection<Object> getAttributeKeys() throws InvalidSessionException { return null; } @Override public Object getAttribute(Object o) throws InvalidSessionException { return null; } @Override public void setAttribute(Object o, Object o1) throws InvalidSessionException { } @Override public Object removeAttribute(Object o) throws InvalidSessionException { return null; } } <MSG> Move IRedisManager back to root <DFF> @@ -2,7 +2,6 @@ package org.crazycake.shiro; import org.apache.shiro.session.InvalidSessionException; import org.apache.shiro.session.Session; -import org.crazycake.shiro.common.IRedisManager; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer;
0
Move IRedisManager back to root
1
.java
java
mit
alexxiyang/shiro-redis
1718
<NME> solve_quick.py <BEF> ADDFILE <MSG> add examples/cifar10 <DFF> @@ -0,0 +1,19 @@ +# -------------------------------------------------------- +# Cifar-10 for Dragon +# Copyright(c) 2017 SeetaTech +# Written by Ting Pan +# -------------------------------------------------------- + +""" Train a cifar-10 net """ + +import dragon.vm.caffe as caffe + +if __name__ == '__main__': + + # init + caffe.set_mode_gpu() + + # solve + solver = caffe.SGDSolver('cifar10_quick_solver.prototxt') + solver.step(5000) + solver.snapshot() \ No newline at end of file
19
add examples/cifar10
0
.py
py
bsd-2-clause
neopenx/Dragon
1719
<NME> solve_quick.py <BEF> ADDFILE <MSG> add examples/cifar10 <DFF> @@ -0,0 +1,19 @@ +# -------------------------------------------------------- +# Cifar-10 for Dragon +# Copyright(c) 2017 SeetaTech +# Written by Ting Pan +# -------------------------------------------------------- + +""" Train a cifar-10 net """ + +import dragon.vm.caffe as caffe + +if __name__ == '__main__': + + # init + caffe.set_mode_gpu() + + # solve + solver = caffe.SGDSolver('cifar10_quick_solver.prototxt') + solver.step(5000) + solver.snapshot() \ No newline at end of file
19
add examples/cifar10
0
.py
py
bsd-2-clause
neopenx/Dragon
1720
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation * directly download jar file Download shiro-redis.jar in bin folder and add it into your classpath. * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <MSG> Update README.md update readme <DFF> @@ -10,6 +10,7 @@ You can chose these 2 ways to include shiro-redis into your project * directly download jar file Download shiro-redis.jar in bin folder and add it into your classpath. * add maven dependency + ```xml <dependency> <groupId>org.crazycake</groupId>
1
Update README.md
0
.md
md
mit
alexxiyang/shiro-redis
1721
<NME> RedisCacheTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.subject.PrincipalCollection; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Set; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; import static org.mockito.Mockito.*; public class RedisCacheTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisCache mountRedisCache() { return new RedisCache(redisManager, new StringSerializer(), new ObjectSerializer(), "employee:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME); } @Test public void testInitialize() { Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(null, null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), new StringSerializer(), null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); } @Test public void testPut() throws SerializationException { RedisCache rc = mountRedisCache(); Object value = rc.put("foo", "bar"); assertThat(value, is("bar")); verify(redisManager).set(keySerializer.serialize("employee:foo"), valueSerializer.serialize("bar"), 1); PrincipalCollection principal = new EmployeePrincipal(3); rc.put(principal, "account information"); verify(redisManager).set(keySerializer.serialize("employee:3"), valueSerializer.serialize("account information"), 1); } } class Employee { private int id; public Employee(int id) { this.id = id; } public int getId() { return this.id; } } class EmployeePrincipal implements PrincipalCollection { private Employee primaryPrincipal; public EmployeePrincipal(int id) { this.primaryPrincipal = new Employee(id); } @Override public Employee getPrimaryPrincipal() { return this.primaryPrincipal; } @Override public <T> T oneByType(Class<T> aClass) { return null; } @Override public <T> Collection<T> byType(Class<T> aClass) { return null; } } @Test public void testPutIdeal() { doPutAuth(redisCache, user1); FakeAuth fakeAuth = redisCache.get(user1); assertAuthEquals(fakeAuth, turnUserToFakeAuth((UserInfo)user1.getPrimaryPrincipal())); } @Test public void testSize() { doPutAuth(redisCache, user1); doPutAuth(redisCache, user2); assertEquals(redisCache.size(), 2); } public Set<String> getRealmNames() { return null; } @Override public boolean isEmpty() { return false; } @Override public Iterator iterator() { return null; } } <MSG> Fix test case. Release 3.2.0 <DFF> @@ -89,16 +89,17 @@ public class RedisCacheTest { } @Test - public void testPutIdeal() { + public void testPut() { doPutAuth(redisCache, user1); FakeAuth fakeAuth = redisCache.get(user1); assertAuthEquals(fakeAuth, turnUserToFakeAuth((UserInfo)user1.getPrimaryPrincipal())); } @Test - public void testSize() { + public void testSize() throws InterruptedException { doPutAuth(redisCache, user1); doPutAuth(redisCache, user2); + Thread.sleep(200); assertEquals(redisCache.size(), 2); }
3
Fix test case. Release 3.2.0
2
.java
java
mit
alexxiyang/shiro-redis
1722
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). # redisManager.password = chenxing ``` If you use redis cluster, config like this : ```properties # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 # Redis cache key/value expire time. Default value:0 .The expire time is in second (Optional) redisManager.expire = 600 # Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds).(Optional) redisManager.timeout = 2000 # timeout for jedis try to read data from redis server (Optional) redisManager.soTimeout = 2000 # max attempts to connect to server (Optional) redisManager.maxAttempts = 2 # Redis password.(Optional) #redisManager.password = xxxx ``` Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ## Spring </bean> <!-- shiro-redis configuration [end] --> ``` Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ## Serializer <MSG> add redis sentinel support <DFF> @@ -105,26 +105,6 @@ redisManager.soTimeout = 2000 # redisManager.password = chenxing ``` -If you use redis cluster, config like this : - -```properties -# Create redisManager -redisManager = org.crazycake.shiro.RedisClusterManager -# Redis host and port list -redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 -# Redis cache key/value expire time. Default value:0 .The expire time is in second (Optional) -redisManager.expire = 600 -# Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds).(Optional) -redisManager.timeout = 2000 -# timeout for jedis try to read data from redis server (Optional) -redisManager.soTimeout = 2000 -# max attempts to connect to server (Optional) -redisManager.maxAttempts = 2 -# Redis password.(Optional) -#redisManager.password = xxxx - -``` - Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ## Spring @@ -170,6 +150,25 @@ spring.xml: </bean> <!-- shiro-redis configuration [end] --> ``` + +If you use redis sentinel, config like this : +```xml +<!-- shiro-redis configuration [start] --> +<!-- shiro redisManager --> +<bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> + <property name="host" value="192.168.0.192:26379,192.168.0.192:26380,192.168.0.192:26381"/> + <property name="expire" value="1800"/> + <!-- optional properties: + <property name="timeout" value="10000"/> + <property name="soTimeout" value="10000"/> + <property name="masterName" value="mymaster"/> + <property name="password" value="123456"/> + <property name="database" value="1"/> + --> +</bean> +``` + + Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ## Serializer
19
add redis sentinel support
20
.md
md
mit
alexxiyang/shiro-redis
1723
<NME> README.rst <BEF> Hitch ===== Hitch is a loosely-coupled, isolated by design testing framework built upon python's unittest that lets you write simple, easy to read and easy to debug tests for *any* software (not just web apps and not just python apps). * `Test readability <https://hitchtest.readthedocs.org/en/latest/glossary/test_readability.html>`_ * `Loose coupling <https://hitchtest.readthedocs.org/en/latest/glossary/loose_coupling.html>`_ * `Test realism <https://hitchtest.readthedocs.org/en/latest/glossary/test_realism.html>`_ * Tests that `fail fast <https://hitchtest.readthedocs.org/en/latest/glossary/fail_fast.html>`_ and `fail clearly <https://hitchtest.readthedocs.org/en/latest/glossary/fail_clearly.html>`_ Available plugins ----------------- Hitch comes with a variety of plugins to aid you to realistically testing various kinds of software, components and scenarios, including: * `Python <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpython.html>`_ (includes Django and Celery service definitions) * `Postgresql <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpostgres.html>`_ * `Redis <https://hitchtest.readthedocs.org/en/latest/plugins/hitchredis.html>`_ * `Web apps (using selenium) <https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html>`_ * Command line apps (using pexpect) * `Cron <https://hitchtest.readthedocs.org/en/latest/plugins/hitchcron.html>`_ * MySQL * RabbitMQ * Elastic Search `Plugin documentation <https://hitchtest.readthedocs.org/en/latest/plugins/>`_ Getting started --------------- See the `quickstart tutorial <https://hitchtest.readthedocs.org/en/latest/quickstart/index.html>`_ on how to get started testing an existing project. Also check out `cookiecutter-django <https://github.com/pydanny/cookiecutter-django>`_ if you want to start a new Django project with tests. Status ------ Hitch is currently in beta. It is regression tested on: * Operating Systems : Mac OS X Yosemite, Ubuntu, Debian, Fedora and Arch Linux. * Python versions : 3.5.0, 3.4.3, 3.4.0 and 3.3.0 `(what about python 2?) <https://hitchtest.readthedocs.org/en/latest/faq/what_about_python2.html>`_ It does not currently work on Windows. See `tested on <https://hitchtest.readthedocs.org/en/latest/misc/tested_on.html>`_ for more details on how the framework is tested (with itself, naturally). Contents of this project ------------------------ This project contains: * The code for the bootstrapper script * Documentation for the whole project (`hosted at readthedocs <https://hitchtest.readthedocs.org/en/latest/>`_) * Code for other components is at: https://github.com/hitchtest/ <MSG> Added Gitter badge <DFF> @@ -1,6 +1,10 @@ Hitch ===== +.. image:: https://badges.gitter.im/Join%20Chat.svg + :alt: Join the chat at https://gitter.im/hitchtest/hitch + :target: https://gitter.im/hitchtest/hitch?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + Hitch is a loosely-coupled, isolated by design testing framework built upon python's unittest that lets you write simple, easy to read and easy to debug tests for *any* software (not just web apps and not just python apps).
4
Added Gitter badge
0
.rst
rst
agpl-3.0
hitchtest/hitch
1724
<NME> README.md <BEF> # Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework ![](http://dragon.seetatech.com/static/images/styles-dragon.png) ----- ## Deprecated. See [seetaresearch/Dragon](http://github.com/seetaresearch/Dragon). 2. CUDA [Optional] 3. CUDNN [Optional] 4. OpenMPI [Optional] ----- ### Installation (Optional) Download and install [CUDNN](https://developer.nvidia.com/cudnn) 3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir) [*Win64-VS2013*](https://pan.baidu.com/s/1miGAZl2) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v7 / Microsoft MPI) - Set CUDA compiling architectures if necessary - GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found - We pre-generated files under ``Dragon/src/protos`` with protobuf-2.6, run ``protoc`` by yourself if higher are required 6. Environment Variables ### Linux(Only for OpenMPI): <MSG> add NCCL support for synchronous distributed training <DFF> @@ -7,6 +7,7 @@ 2. CUDA [Optional] 3. CUDNN [Optional] 4. OpenMPI [Optional] +5. NCCL [Optional] ----- ### Installation @@ -16,6 +17,8 @@ (Optional) Download and install [CUDNN](https://developer.nvidia.com/cudnn) + (Optional, Linux Only) Download and install [NCCL](https://developer.nvidia.com/nccl) + 3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir) [*Win64-VS2013*](https://pan.baidu.com/s/1miGAZl2) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v7 / Microsoft MPI) @@ -42,6 +45,7 @@ - Set CUDA compiling architectures if necessary - GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found - We pre-generated files under ``Dragon/src/protos`` with protobuf-2.6, run ``protoc`` by yourself if higher are required + - OpenMPI can take NCCL and our CUDA impl at the same time, prefer not to use NCCL(*memory inefficient*) 6. Environment Variables ### Linux(Only for OpenMPI):
4
add NCCL support for synchronous distributed training
0
.md
md
bsd-2-clause
neopenx/Dragon
1725
<NME> README.md <BEF> # Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework ![](http://dragon.seetatech.com/static/images/styles-dragon.png) ----- ## Deprecated. See [seetaresearch/Dragon](http://github.com/seetaresearch/Dragon). 2. CUDA [Optional] 3. CUDNN [Optional] 4. OpenMPI [Optional] ----- ### Installation (Optional) Download and install [CUDNN](https://developer.nvidia.com/cudnn) 3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir) [*Win64-VS2013*](https://pan.baidu.com/s/1miGAZl2) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v7 / Microsoft MPI) - Set CUDA compiling architectures if necessary - GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found - We pre-generated files under ``Dragon/src/protos`` with protobuf-2.6, run ``protoc`` by yourself if higher are required 6. Environment Variables ### Linux(Only for OpenMPI): <MSG> add NCCL support for synchronous distributed training <DFF> @@ -7,6 +7,7 @@ 2. CUDA [Optional] 3. CUDNN [Optional] 4. OpenMPI [Optional] +5. NCCL [Optional] ----- ### Installation @@ -16,6 +17,8 @@ (Optional) Download and install [CUDNN](https://developer.nvidia.com/cudnn) + (Optional, Linux Only) Download and install [NCCL](https://developer.nvidia.com/nccl) + 3. (Optional) Download 3rdparty.zip and unzip to Dragon/3rdparty (Out of source code dir) [*Win64-VS2013*](https://pan.baidu.com/s/1miGAZl2) (OpenBLAS / Protobuf2.6 for VS2013 / CUDNN v7 / Microsoft MPI) @@ -42,6 +45,7 @@ - Set CUDA compiling architectures if necessary - GCC version(4.8+, 5.0-) should add ``-std=c++11`` to ``CUDA_NVCC_FLAGS``, if ``nullptr`` is not found - We pre-generated files under ``Dragon/src/protos`` with protobuf-2.6, run ``protoc`` by yourself if higher are required + - OpenMPI can take NCCL and our CUDA impl at the same time, prefer not to use NCCL(*memory inefficient*) 6. Environment Variables ### Linux(Only for OpenMPI):
4
add NCCL support for synchronous distributed training
0
.md
md
bsd-2-clause
neopenx/Dragon
1726
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). </bean> ``` If you found any bugs =========== <MSG> Add warning which tell people to use the 3 parameter constructor of SimpleAuthenticationInfo <DFF> @@ -132,6 +132,10 @@ spring.xml: </bean> ``` +> NOTE +> Shiro-redis don't support SimpleAuthenticationInfo created by this constructor `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, ByteSource credentialsSalt, String realmName)`. +> Please use `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, ByteSource credentialsSalt, String realmName)` instead. + If you found any bugs ===========
4
Add warning which tell people to use the 3 parameter constructor of SimpleAuthenticationInfo
0
.md
md
mit
alexxiyang/shiro-redis
1727
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. > Please use 3.3.1 which is compiled in java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0 (java11) | 1.6.0 | 3.3.0 | | 3.3.1 (java8) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | # If you found any bugs Please send email to [email protected] 可以用中文 | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Update README.md Remove my personal email address <DFF> @@ -430,6 +430,6 @@ For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alex # If you found any bugs -Please send email to [email protected] +Please create the issue 可以用中文
1
Update README.md
1
.md
md
mit
alexxiyang/shiro-redis
1728
<NME> setup.py <BEF> # -*- coding: utf-8 -* from setuptools.command.install import install from setuptools import find_packages from setuptools import setup from sys import version_info, stderr, exit import codecs import sys import os if sys.platform == "win32" or sys.platform == "cygwin": stderr.write("Hitch will not work on Windows. Sorry.\n") return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.4.1", description="Loosely coupled testing framework", long_description=read('README.rst'), classifiers=[ if version_info[0] == 3: if version_info[1] < 3: stderr.write("The hitch bootstrapper will not run on python 3.0.x, 3.1.x or 3.2.x.\n") exit(1) def read(*parts): # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.5.7", description="Bootstrapper for hitchtest - the loosely coupled integration testing framework", long_description=read('README.rst'), classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries', 'Operating System :: Unix', 'Environment :: Console', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', ], keywords='hitch testing framework bdd tdd declarative tests bootstrap virtualenv', author='Colm O\'Connor', author_email='[email protected]', url='https://hitchtest.readthedocs.org/', license='AGPL', install_requires=[], packages=find_packages(exclude=["docs", ]), package_data={}, entry_points=dict(console_scripts=['hitch=hitch:commandline.run',]), zip_safe=False, include_package_data=True, ) <MSG> RELEASE : Bumped version. <DFF> @@ -13,7 +13,7 @@ def read(*parts): return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", - version="0.4.1", + version="0.4.2", description="Loosely coupled testing framework", long_description=read('README.rst'), classifiers=[
1
RELEASE : Bumped version.
1
.py
py
agpl-3.0
hitchtest/hitch
1729
<NME> setup.py <BEF> # -*- coding: utf-8 -* from setuptools.command.install import install from setuptools import find_packages from setuptools import setup from sys import version_info, stderr, exit import codecs import sys import os if sys.platform == "win32" or sys.platform == "cygwin": stderr.write("Hitch will not work on Windows. Sorry.\n") exit(1) if version_info[0] == 2: if version_info[1] < 6: stderr.write("The hitch bootstrapper will not run on versions of python below v2.6.\n") exit(1) return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.5.3", description="Bootstrapper for hitchtest - the loosely coupled integration testing framework", long_description=read('README.rst'), classifiers=[ # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.5.7", description="Bootstrapper for hitchtest - the loosely coupled integration testing framework", long_description=read('README.rst'), classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries', 'Operating System :: Unix', 'Environment :: Console', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', ], keywords='hitch testing framework bdd tdd declarative tests bootstrap virtualenv', author='Colm O\'Connor', author_email='[email protected]', url='https://hitchtest.readthedocs.org/', license='AGPL', install_requires=[], packages=find_packages(exclude=["docs", ]), package_data={}, entry_points=dict(console_scripts=['hitch=hitch:commandline.run',]), zip_safe=False, include_package_data=True, ) <MSG> RELEASE : Bumped version. <DFF> @@ -22,7 +22,7 @@ def read(*parts): return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", - version="0.5.3", + version="0.5.4", description="Bootstrapper for hitchtest - the loosely coupled integration testing framework", long_description=read('README.rst'), classifiers=[
1
RELEASE : Bumped version.
1
.py
py
agpl-3.0
hitchtest/hitch
1730
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * Used for setting/getting authentication information from Redis */ public class RedisSessionDAO extends AbstractSessionDAO { private static Logger logger = LoggerFactory.getLogger(RedisSessionDAO.class); private static final String DEFAULT_SESSION_KEY_PREFIX = "shiro:session:"; private String keyPrefix = DEFAULT_SESSION_KEY_PREFIX; /** * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; } session.setTimeout(redisManager.getExpire()*1000); this.redisManager.set(key, value, redisManager.getExpire()); } @Override private IRedisManager redisManager; /** * Serializer of key */ private RedisSerializer keySerializer = new StringSerializer(); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { key = keySerializer.serialize(getRedisSessionKey(session.getId())); value = valueSerializer.serialize(session); } catch (SerializationException e) { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * MILLISECONDS_IN_A_SECOND) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); } redisManager.set(key, value, expire); } /** * delete session * @param session */ @Override public void delete(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.delSessionFromThreadLocal(session.getId()); } try { redisManager.del(keySerializer.serialize(getRedisSessionKey(session.getId()))); } catch (SerializationException e) { logger.error("delete session error. session id=" + session.getId()); } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Release 2.8.6 - Support multiple authorization realms cache - Remove IRedisManager.set(key, value, expire) <DFF> @@ -53,7 +53,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { } session.setTimeout(redisManager.getExpire()*1000); - this.redisManager.set(key, value, redisManager.getExpire()); + this.redisManager.set(key, value); } @Override
1
Release 2.8.6 - Support multiple authorization realms cache - Remove IRedisManager.set(key, value, expire)
1
.java
java
mit
alexxiyang/shiro-redis
1731
<NME> RedisSessionDAOTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.SimpleSession; import org.junit.Before; import org.junit.Test; import java.util.*; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RedisSessionDAOTest { private RedisManager redisManager; private RedisSessionDAO redisSessionDAO; private StringSerializer keySerializer; private String testKey; private String testPrefix;private Set<byte[]> testSet; private SimpleSession testValue; private Collection<FakeSession> testValues; private FakeSession tomSession; private FakeSession paulSession; private FakeSession billySession; private ObjectSerializer valueSeralizer; @Before public void setUp() throws SerializationException { keySerializer = new StringSerializer(); valueSeralizer = new ObjectSerializer(); testKey = "testKey"; testPrefix = "testPrefix:"; testValue = new SimpleSession(); testValue.setId(3); testSet = new HashSet<byte[]>(); testSet.add(keySerializer.serialize(testPrefix + "tom")); testSet.add(keySerializer.serialize(testPrefix + "paul")); testSet.add(keySerializer.serialize(testPrefix + "billy")); testValues = new ArrayList<FakeSession>(); tomSession = new FakeSession(1, "tom"); testValues.add(tomSession); paulSession = new FakeSession(2, "paul"); testValues.add(paulSession); billySession = new FakeSession(3, "billy"); testValues.add(billySession); redisManager = mock(RedisManager.class); when(redisManager.dbSize()).thenReturn(2L); when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSeralizer.serialize(testValue)); when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet); when(redisManager.get(keySerializer.serialize(testPrefix + "tom"))).thenReturn(valueSeralizer.serialize(tomSession)); when(redisManager.get(keySerializer.serialize(testPrefix + "paul"))).thenReturn(valueSeralizer.serialize(paulSession)); when(redisManager.get(keySerializer.serialize(testPrefix + "billy"))).thenReturn(valueSeralizer.serialize(billySession)); redisSessionDAO = new RedisSessionDAO(); redisSessionDAO.setRedisManager(redisManager); redisSessionDAO.setKeyPrefix(testPrefix); } @Test public void testUpdate() { redisSessionDAO.update(testValue); try { redisSessionDAO.update(null); fail(); } catch (UnknownSessionException e) { assertThat(e.getMessage(), is("session or session id is null")); } } @Test public void testDelete() { redisSessionDAO.delete(null); redisSessionDAO.delete(testValue); } @Test public void testDoCreate() { redisSessionDAO.doCreate(testValue); try { redisSessionDAO.doCreate(null); fail(); } catch (UnknownSessionException e) { assertThat(e.getMessage(), is("session is null")); } } @Test public void testDoReadSession() { Session actualSession = redisSessionDAO.doReadSession(testKey); assertThat(actualSession.getId().toString(), is("3")); redisSessionDAO.doReadSession(null); } @Test public void testGetActiveSessions() { Collection<Session> activeSessions = redisSessionDAO.getActiveSessions(); assertThat(activeSessions.size(), is(3)); for (Iterator<Session> iterator = activeSessions.iterator(); iterator.hasNext(); ) { FakeSession next = (FakeSession)iterator.next(); if (next.getId() == 2) { assertThat(next.getName(), is("paul")); } } } } public void touch() throws InvalidSessionException { } @Override public void stop() throws InvalidSessionException { } @Override public Collection<Object> getAttributeKeys() throws InvalidSessionException { return null; } @Override public Object getAttribute(Object o) throws InvalidSessionException { return null; } @Override public void setAttribute(Object o, Object o1) throws InvalidSessionException { } @Override public Object removeAttribute(Object o) throws InvalidSessionException { return null; } } <MSG> - Enhance RedisSessionDaoTest <DFF> @@ -2,107 +2,130 @@ package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; -import org.apache.shiro.session.mgt.SimpleSession; import org.junit.Before; import org.junit.Test; +import java.io.Serializable; import java.util.*; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; public class RedisSessionDAOTest { private RedisManager redisManager; private RedisSessionDAO redisSessionDAO; private StringSerializer keySerializer; - private String testKey; - private String testPrefix;private Set<byte[]> testSet; - private SimpleSession testValue; - private Collection<FakeSession> testValues; - private FakeSession tomSession; - private FakeSession paulSession; - private FakeSession billySession; + + private String testPrefix; private ObjectSerializer valueSeralizer; @Before public void setUp() throws SerializationException { keySerializer = new StringSerializer(); valueSeralizer = new ObjectSerializer(); - testKey = "testKey"; testPrefix = "testPrefix:"; - testValue = new SimpleSession(); - testValue.setId(3); - testSet = new HashSet<byte[]>(); - testSet.add(keySerializer.serialize(testPrefix + "tom")); - testSet.add(keySerializer.serialize(testPrefix + "paul")); - testSet.add(keySerializer.serialize(testPrefix + "billy")); - testValues = new ArrayList<FakeSession>(); - tomSession = new FakeSession(1, "tom"); - testValues.add(tomSession); - paulSession = new FakeSession(2, "paul"); - testValues.add(paulSession); - billySession = new FakeSession(3, "billy"); - testValues.add(billySession); redisManager = mock(RedisManager.class); - when(redisManager.dbSize()).thenReturn(2L); - when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSeralizer.serialize(testValue)); - when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet); - when(redisManager.get(keySerializer.serialize(testPrefix + "tom"))).thenReturn(valueSeralizer.serialize(tomSession)); - when(redisManager.get(keySerializer.serialize(testPrefix + "paul"))).thenReturn(valueSeralizer.serialize(paulSession)); - when(redisManager.get(keySerializer.serialize(testPrefix + "billy"))).thenReturn(valueSeralizer.serialize(billySession)); redisSessionDAO = new RedisSessionDAO(); redisSessionDAO.setRedisManager(redisManager); redisSessionDAO.setKeyPrefix(testPrefix); + redisSessionDAO.setExpire(1); } @Test - public void testUpdate() { - redisSessionDAO.update(testValue); + public void testDoCreate() throws SerializationException { try { - redisSessionDAO.update(null); + redisSessionDAO.doCreate(null); fail(); } catch (UnknownSessionException e) { - assertThat(e.getMessage(), is("session or session id is null")); + assertThat(e.getMessage(), is("session is null")); } - } + verify(redisManager, times(0)).set(any((new byte[0]).getClass()), any((new byte[0]).getClass()), eq(1)); - @Test - public void testDelete() { - redisSessionDAO.delete(null); - redisSessionDAO.delete(testValue); + RedisSessionDAO prefixTestRedisSessionDao = new RedisSessionDAO(); + prefixTestRedisSessionDao.setKeyPrefix("abc:"); + prefixTestRedisSessionDao.setRedisManager(redisManager); + prefixTestRedisSessionDao.setExpire(2); + FakeSession fakeSession = new FakeSession(1, "Tom"); + String sessionId = (String)prefixTestRedisSessionDao.doCreate(fakeSession); + verify(redisManager, times(0)).set(eq(keySerializer.serialize("abc:" + sessionId)), any((new byte[0]).getClass()), eq(2)); } @Test - public void testDoCreate() { - redisSessionDAO.doCreate(testValue); + public void testUpdate() throws SerializationException { + FakeSession testSession = new FakeSession(1, "jack"); + byte[] testSessionKeyBytes = keySerializer.serialize(testPrefix + "1"); + byte[] testSessionValueBytes = valueSeralizer.serialize(testSession); + redisSessionDAO.update(testSession); + verify(redisManager, times(1)).set(testSessionKeyBytes, testSessionValueBytes, 1); try { - redisSessionDAO.doCreate(null); + redisSessionDAO.update(null); fail(); } catch (UnknownSessionException e) { - assertThat(e.getMessage(), is("session is null")); + assertThat(e.getMessage(), is("session or session id is null")); + } + + try { + FakeSession nullIdSession = new FakeSession(); + redisSessionDAO.update(nullIdSession); + fail(); + } catch (UnknownSessionException e) { + assertThat(e.getMessage(), is("session or session id is null")); } } @Test - public void testDoReadSession() { - Session actualSession = redisSessionDAO.doReadSession(testKey); - assertThat(actualSession.getId().toString(), is("3")); - redisSessionDAO.doReadSession(null); + public void testDelete() { + redisSessionDAO.delete(null); + verify(redisManager, times(0)).del(any((new byte[0]).getClass())); + FakeSession nullIdSession = new FakeSession(); + redisSessionDAO.delete(nullIdSession); + verify(redisManager, times(0)).del(any((new byte[0]).getClass())); + FakeSession testSession = new FakeSession(2, "Tom"); + redisSessionDAO.delete(testSession); + verify(redisManager, times(1)).del(any((new byte[0]).getClass())); } @Test - public void testGetActiveSessions() { + public void testDoReadSession() throws NoSuchFieldException, IllegalAccessException { + Session nullSession = redisSessionDAO.doReadSession(null); + assertThat(nullSession, is(nullValue())); + + RedisSessionDAO redisSessionDAO2 = new RedisSessionDAO(); + redisSessionDAO2.setRedisManager(redisManager); + redisSessionDAO2.setKeyPrefix(testPrefix); + redisSessionDAO2.setExpire(2); + ThreadLocal sessionsInThread = mock(ThreadLocal.class); + Map<Serializable, SessionInMemory> sessionMap = new HashMap<Serializable, SessionInMemory>(); + SessionInMemory sessionInMemory = new SessionInMemory(); + sessionInMemory.setSession(new FakeSession(1, "Billy")); + sessionInMemory.setCreateTime(new Date()); + sessionMap.put("1", sessionInMemory); + when(sessionsInThread.get()).thenReturn(sessionMap); + TestUtils.setPrivateField(redisSessionDAO2, "sessionsInThread", sessionsInThread); + FakeSession actualSession = (FakeSession)redisSessionDAO2.doReadSession("1"); + assertThat(actualSession.getId().toString(), is("1")); + assertThat(actualSession.getName(), is("Billy")); + verify(redisManager, times(0)).get(any((new byte[0]).getClass())); + } + + @Test + public void testGetActiveSessions() throws SerializationException { + Set<byte[]> fakeKeys = new HashSet<byte[]>(); + byte[] firstKeyBytes = keySerializer.serialize("1"); + fakeKeys.add(firstKeyBytes); + byte[] secondKeyBytes = keySerializer.serialize("2"); + fakeKeys.add(secondKeyBytes); + when(redisManager.keys(any((new byte[0]).getClass()))).thenReturn(fakeKeys); + FakeSession firstSession = new FakeSession(1, "Tom"); + when(redisManager.get(firstKeyBytes)).thenReturn(valueSeralizer.serialize(firstSession)); + FakeSession secondSession = new FakeSession(2, "Billy"); + when(redisManager.get(secondKeyBytes)).thenReturn(valueSeralizer.serialize(secondSession)); + Collection<Session> activeSessions = redisSessionDAO.getActiveSessions(); - assertThat(activeSessions.size(), is(3)); - for (Iterator<Session> iterator = activeSessions.iterator(); iterator.hasNext(); ) { - FakeSession next = (FakeSession)iterator.next(); - if (next.getId() == 2) { - assertThat(next.getName(), is("paul")); - } - } + assertThat(activeSessions.size(), is(2)); } }
78
- Enhance RedisSessionDaoTest
55
.java
java
mit
alexxiyang/shiro-redis
1732
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.0.0</version> </dependency> ``` > **Note:**\ > Do not use version < 3.0.0\ > **注意**:\ > 请不要使用3.0.0以下版本 # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalId = id` For example: } ``` You need to make sure `UserInfo` has an unique field to identify it in Redis. Take userId as an example: ```java public class UserInfo implements Serializable{ private String username; private Integer age; public String getUsername() { return username; } this.username = username; } public Integer getAge() { return age; } public void setAge(Integer age) { this.age = age; } public Integer getUserId() { return this.userId; } } ``` And put userId as `cacheManager.principalId`, like this: ```properties cacheManager.principalId = userId ``` # How to configure ? You can configure shiro-redis either in `shiro.ini` or in `spring-*.xml` <MSG> Release 3.1.0 <DFF> @@ -16,17 +16,17 @@ You can choose these 2 ways to include shiro-redis into your project <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> - <version>3.0.0</version> + <version>3.1.0</version> </dependency> ``` > **Note:**\ -> Do not use version < 3.0.0\ +> Do not use version < 3.1.0\ > **注意**:\ -> 请不要使用3.0.0以下版本 +> 请不要使用3.1.0以下版本 # Before use -Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalId = id` +Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: @@ -41,7 +41,7 @@ protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) } ``` -You need to make sure `UserInfo` has an unique field to identify it in Redis. Take userId as an example: +Then the userInfo object is your principal object. You need to make sure `UserInfo` has an unique field to identify it in Redis. Take userId as an example: ```java public class UserInfo implements Serializable{ @@ -49,8 +49,6 @@ public class UserInfo implements Serializable{ private String username; - private Integer age; - public String getUsername() { return username; } @@ -59,25 +57,24 @@ public class UserInfo implements Serializable{ this.username = username; } - public Integer getAge() { - return age; - } - - public void setAge(Integer age) { - this.age = age; - } - public Integer getUserId() { return this.userId; } } ``` -And put userId as `cacheManager.principalId`, like this: +Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties -cacheManager.principalId = userId +cacheManager.principalIdFieldName = userId ``` +If you're using Spring, the configuration should be +```xml +<property name="principalIdFieldName" value="userId" /> +``` + +Then shiro-redis will call `userInfo.getUserId()` to get the id for storing Redis object. + # How to configure ? You can configure shiro-redis either in `shiro.ini` or in `spring-*.xml`
14
Release 3.1.0
17
.md
md
mit
alexxiyang/shiro-redis
1733
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * Used for setting/getting authentication information from Redis */ public class RedisSessionDAO extends AbstractSessionDAO { private static Logger logger = LoggerFactory.getLogger(RedisSessionDAO.class); private static final String DEFAULT_SESSION_KEY_PREFIX = "shiro:session:"; private String keyPrefix = DEFAULT_SESSION_KEY_PREFIX; /** * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; byte[] key = getByteKey(session.getId()); byte[] value = SerializeUtils.serialize(session); Long timeout = session.getTimeout()/1000; int expire = timeout.intValue(); this.redisManager.set(key, value, expire); } @Override * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; private int expire = DEFAULT_EXPIRE; private static final int MILLISECONDS_IN_A_SECOND = 1000; /** * redisManager used for communicate with Redis */ private IRedisManager redisManager; /** * Serializer of key */ private RedisSerializer keySerializer = new StringSerializer(); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { key = keySerializer.serialize(getRedisSessionKey(session.getId())); value = valueSerializer.serialize(session); } catch (SerializationException e) { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * MILLISECONDS_IN_A_SECOND) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); } redisManager.set(key, value, expire); } /** * delete session * @param session */ @Override public void delete(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.delSessionFromThreadLocal(session.getId()); } try { redisManager.del(keySerializer.serialize(getRedisSessionKey(session.getId()))); } catch (SerializationException e) { logger.error("delete session error. session id=" + session.getId()); } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> resolve expire setting not working bug <DFF> @@ -40,11 +40,8 @@ public class RedisSessionDAO extends AbstractSessionDAO { byte[] key = getByteKey(session.getId()); byte[] value = SerializeUtils.serialize(session); - - Long timeout = session.getTimeout()/1000; - int expire = timeout.intValue(); - - this.redisManager.set(key, value, expire); + session.setTimeout(redisManager.getExpire()*1000); + this.redisManager.set(key, value, redisManager.getExpire()); } @Override
2
resolve expire setting not working bug
5
.java
java
mit
alexxiyang/shiro-redis
1734
<NME> roi_pooling_op.cc <BEF> #include "operators/vision/roi_pooling_op.h" #include "core/workspace.h" #include "utils/math_functions.h" #include "utils/op_kernel.h" namespace dragon { template <class Context> template <typename T> void ROIPoolingOp<Context>::RunWithType() { kernel::ROIPooling<T, Context>(spatial_scale, pool_h, pool_w, &input(0), &input(1), mask, output(0)); } template <class Context> void ROIPoolingOp<Context>::RunOnDevice() { vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); output(0)->Reshape(dims); mask = ws()->CreateTensor("_t_" + anchor() + "_roi_pool_mask"); mask->Reshape(dims); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(ROIPooling); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPooling); #endif OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1); template <class Context> template <typename T> void ROIPoolingGradientOp<Context>::RunWithType() { kernel::ROIPoolingGrad<T, Context>(spatial_scale, pool_h, pool_w, &input(-1), &input(1), mask, output(0)); } template <class Context> void ROIPoolingGradientOp<Context>::RunOnDevice() { output(0)->ReshapeLike(input(0)); mask = ws()->GetTensor("_t_" + anchor() + "_roi_pool_mask"); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } template <class Context> void ROIPoolingGradientOp<Context>::CleanResource() { Operator<Context>::CleanResource(); ws()->ReleaseBuffer(mask, "Common", true); } DEPLOY_CPU(ROIPoolingGradient); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPoolingGradient); #endif OPERATOR_SCHEMA(ROIPoolingGradient).NumInputs(3).NumOutputs(1); class GetROIPoolingGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetROIPoolingGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), I(1), GO(0)}, vector<string> {GI(0)}); } }; REGISTER_GRADIENT(ROIPooling, GetROIPoolingGradient); } // namespace dragon <MSG> Refactor Norm Module <DFF> @@ -17,10 +17,10 @@ void ROIPoolingOp<Context>::RunWithType() { template <class Context> void ROIPoolingOp<Context>::RunOnDevice() { + mask = ws()->CreateTensor("/mnt/" + anchor() + "/roi_pool_mask"); + vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); output(0)->Reshape(dims); - - mask = ws()->CreateTensor("_t_" + anchor() + "_roi_pool_mask"); mask->Reshape(dims); if (input(0).template IsType<float>()) return RunWithType<float>(); @@ -33,10 +33,9 @@ DEPLOY_CUDA(ROIPooling); #endif OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1); - template <class Context> template <typename T> void ROIPoolingGradientOp<Context>::RunWithType() { - kernel::ROIPoolingGrad<T, Context>(spatial_scale, + kernel::ROIPoolingGrad<T, Context>(spatial_scale, pool_h, pool_w, &input(-1), &input(1), @@ -46,20 +45,14 @@ void ROIPoolingGradientOp<Context>::RunWithType() { template <class Context> void ROIPoolingGradientOp<Context>::RunOnDevice() { - output(0)->ReshapeLike(input(0)); + mask = ws()->GetTensor("/mnt/" + anchor() + "/roi_pool_mask"); - mask = ws()->GetTensor("_t_" + anchor() + "_roi_pool_mask"); + output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } -template <class Context> -void ROIPoolingGradientOp<Context>::CleanResource() { - Operator<Context>::CleanResource(); - ws()->ReleaseBuffer(mask, "Common", true); -} - DEPLOY_CPU(ROIPoolingGradient); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPoolingGradient);
5
Refactor Norm Module
12
.cc
cc
bsd-2-clause
neopenx/Dragon
1735
<NME> roi_pooling_op.cc <BEF> #include "operators/vision/roi_pooling_op.h" #include "core/workspace.h" #include "utils/math_functions.h" #include "utils/op_kernel.h" namespace dragon { template <class Context> template <typename T> void ROIPoolingOp<Context>::RunWithType() { kernel::ROIPooling<T, Context>(spatial_scale, pool_h, pool_w, &input(0), &input(1), mask, output(0)); } template <class Context> void ROIPoolingOp<Context>::RunOnDevice() { vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); output(0)->Reshape(dims); mask = ws()->CreateTensor("_t_" + anchor() + "_roi_pool_mask"); mask->Reshape(dims); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(ROIPooling); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPooling); #endif OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1); template <class Context> template <typename T> void ROIPoolingGradientOp<Context>::RunWithType() { kernel::ROIPoolingGrad<T, Context>(spatial_scale, pool_h, pool_w, &input(-1), &input(1), mask, output(0)); } template <class Context> void ROIPoolingGradientOp<Context>::RunOnDevice() { output(0)->ReshapeLike(input(0)); mask = ws()->GetTensor("_t_" + anchor() + "_roi_pool_mask"); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } template <class Context> void ROIPoolingGradientOp<Context>::CleanResource() { Operator<Context>::CleanResource(); ws()->ReleaseBuffer(mask, "Common", true); } DEPLOY_CPU(ROIPoolingGradient); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPoolingGradient); #endif OPERATOR_SCHEMA(ROIPoolingGradient).NumInputs(3).NumOutputs(1); class GetROIPoolingGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetROIPoolingGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), I(1), GO(0)}, vector<string> {GI(0)}); } }; REGISTER_GRADIENT(ROIPooling, GetROIPoolingGradient); } // namespace dragon <MSG> Refactor Norm Module <DFF> @@ -17,10 +17,10 @@ void ROIPoolingOp<Context>::RunWithType() { template <class Context> void ROIPoolingOp<Context>::RunOnDevice() { + mask = ws()->CreateTensor("/mnt/" + anchor() + "/roi_pool_mask"); + vector<TIndex> dims({input(1).dim(0), input(0).dim(1), pool_h, pool_w}); output(0)->Reshape(dims); - - mask = ws()->CreateTensor("_t_" + anchor() + "_roi_pool_mask"); mask->Reshape(dims); if (input(0).template IsType<float>()) return RunWithType<float>(); @@ -33,10 +33,9 @@ DEPLOY_CUDA(ROIPooling); #endif OPERATOR_SCHEMA(ROIPooling).NumInputs(2).NumOutputs(1); - template <class Context> template <typename T> void ROIPoolingGradientOp<Context>::RunWithType() { - kernel::ROIPoolingGrad<T, Context>(spatial_scale, + kernel::ROIPoolingGrad<T, Context>(spatial_scale, pool_h, pool_w, &input(-1), &input(1), @@ -46,20 +45,14 @@ void ROIPoolingGradientOp<Context>::RunWithType() { template <class Context> void ROIPoolingGradientOp<Context>::RunOnDevice() { - output(0)->ReshapeLike(input(0)); + mask = ws()->GetTensor("/mnt/" + anchor() + "/roi_pool_mask"); - mask = ws()->GetTensor("_t_" + anchor() + "_roi_pool_mask"); + output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) return RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } -template <class Context> -void ROIPoolingGradientOp<Context>::CleanResource() { - Operator<Context>::CleanResource(); - ws()->ReleaseBuffer(mask, "Common", true); -} - DEPLOY_CPU(ROIPoolingGradient); #ifdef WITH_CUDA DEPLOY_CUDA(ROIPoolingGradient);
5
Refactor Norm Module
12
.cc
cc
bsd-2-clause
neopenx/Dragon
1736
<NME> gradient_op.cc <BEF> #include "operators/misc/gradient_op.h" #include "core/workspace.h" #include "utils/math_functions.h" namespace dragon { template <class Context> template <typename T> void GradientGenerateOp<Context>::RunWithType() { for (int i = 0; i < OutputSize(); i++) { if (output(i)->name() == "ignore") continue; output(i)->ReshapeLike(input(i)); auto* dXdata = output(0)->template mutable_data<T, Context>(); math::Set<T, Context>(output(0)->count(), dragon_cast<T, float>(defaults[i]), dXdata); } } template <class Context> void GradientGenerateOp<Context>::RunOnDevice() { if (input(0).template IsType<float>()) RunWithType<float>(); #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) RunWithType<float16>(); #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(GradientGenerate); #ifdef WITH_CUDA DEPLOY_CUDA(GradientGenerate); #endif OPERATOR_SCHEMA(GradientGenerate); template <class Context> template <typename T> void GradientGatherOp<Context>::RunWithType() { auto* dXdata = output(0)->template mutable_data<T, Context>(); TIndex count = output(0)->count(); for (int i = 1; i < indices.size(); i++) { CHECK(output(0)->dims() == input(indices[i]).dims()); math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata); input(indices[i]).Reset(); } } template <class Context> void GradientGatherOp<Context>::RunOnDevice() { if (indices.size() == 0) return; ws()->CreateAvatar(output(0), &input(indices[0])); if (input(indices[0]).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(GradientGather); #ifdef WITH_CUDA DEPLOY_CUDA(GradientGather); #endif OPERATOR_SCHEMA(GradientGather).NumOutputs(1); NO_GRADIENT(GradientGather); template <class Context> void StopGradientOp<Context>::RunOnDevice() {} DEPLOY_CPU(StopGradient); #ifdef WITH_CUDA DEPLOY_CUDA(StopGradient); #endif OPERATOR_SCHEMA(StopGradient).NumInputs(1).NumOutputs(1).Inplace({ { 0, 0 } });; NO_GRADIENT(StopGradient); } // namespace dragon <MSG> Remove Avatar on gathering gradients <DFF> @@ -35,9 +35,11 @@ template <class Context> template <typename T> void GradientGatherOp<Context>::RunWithType() { auto* dXdata = output(0)->template mutable_data<T, Context>(); TIndex count = output(0)->count(); - for (int i = 1; i < indices.size(); i++) { + for (int i = 0; i < indices.size(); i++) { CHECK(output(0)->dims() == input(indices[i]).dims()); - math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata); + auto* dYdata = input(indices[i]).template data<T, Context>(); + if (i == 0) ctx().template Copy<T, Context, Context>(count, dXdata, dYdata); + else math::Add<T, Context>(count, dXdata, dYdata, dXdata); input(indices[i]).Reset(); } } @@ -45,7 +47,7 @@ void GradientGatherOp<Context>::RunWithType() { template <class Context> void GradientGatherOp<Context>::RunOnDevice() { if (indices.size() == 0) return; - ws()->CreateAvatar(output(0), &input(indices[0])); + output(0)->ReshapeLike(input(indices[0])); if (input(indices[0]).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types.";
5
Remove Avatar on gathering gradients
3
.cc
cc
bsd-2-clause
neopenx/Dragon
1737
<NME> gradient_op.cc <BEF> #include "operators/misc/gradient_op.h" #include "core/workspace.h" #include "utils/math_functions.h" namespace dragon { template <class Context> template <typename T> void GradientGenerateOp<Context>::RunWithType() { for (int i = 0; i < OutputSize(); i++) { if (output(i)->name() == "ignore") continue; output(i)->ReshapeLike(input(i)); auto* dXdata = output(0)->template mutable_data<T, Context>(); math::Set<T, Context>(output(0)->count(), dragon_cast<T, float>(defaults[i]), dXdata); } } template <class Context> void GradientGenerateOp<Context>::RunOnDevice() { if (input(0).template IsType<float>()) RunWithType<float>(); #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) RunWithType<float16>(); #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(GradientGenerate); #ifdef WITH_CUDA DEPLOY_CUDA(GradientGenerate); #endif OPERATOR_SCHEMA(GradientGenerate); template <class Context> template <typename T> void GradientGatherOp<Context>::RunWithType() { auto* dXdata = output(0)->template mutable_data<T, Context>(); TIndex count = output(0)->count(); for (int i = 1; i < indices.size(); i++) { CHECK(output(0)->dims() == input(indices[i]).dims()); math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata); input(indices[i]).Reset(); } } template <class Context> void GradientGatherOp<Context>::RunOnDevice() { if (indices.size() == 0) return; ws()->CreateAvatar(output(0), &input(indices[0])); if (input(indices[0]).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(GradientGather); #ifdef WITH_CUDA DEPLOY_CUDA(GradientGather); #endif OPERATOR_SCHEMA(GradientGather).NumOutputs(1); NO_GRADIENT(GradientGather); template <class Context> void StopGradientOp<Context>::RunOnDevice() {} DEPLOY_CPU(StopGradient); #ifdef WITH_CUDA DEPLOY_CUDA(StopGradient); #endif OPERATOR_SCHEMA(StopGradient).NumInputs(1).NumOutputs(1).Inplace({ { 0, 0 } });; NO_GRADIENT(StopGradient); } // namespace dragon <MSG> Remove Avatar on gathering gradients <DFF> @@ -35,9 +35,11 @@ template <class Context> template <typename T> void GradientGatherOp<Context>::RunWithType() { auto* dXdata = output(0)->template mutable_data<T, Context>(); TIndex count = output(0)->count(); - for (int i = 1; i < indices.size(); i++) { + for (int i = 0; i < indices.size(); i++) { CHECK(output(0)->dims() == input(indices[i]).dims()); - math::Add<T, Context>(count, dXdata, input(indices[i]).template data<T, Context>(), dXdata); + auto* dYdata = input(indices[i]).template data<T, Context>(); + if (i == 0) ctx().template Copy<T, Context, Context>(count, dXdata, dYdata); + else math::Add<T, Context>(count, dXdata, dYdata, dXdata); input(indices[i]).Reset(); } } @@ -45,7 +47,7 @@ void GradientGatherOp<Context>::RunWithType() { template <class Context> void GradientGatherOp<Context>::RunOnDevice() { if (indices.size() == 0) return; - ws()->CreateAvatar(output(0), &input(indices[0])); + output(0)->ReshapeLike(input(indices[0])); if (input(indices[0]).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types.";
5
Remove Avatar on gathering gradients
3
.cc
cc
bsd-2-clause
neopenx/Dragon
1738
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE): pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) if path.exists(python): python3 = python else: stderr.write("{} not found.\n".format(python)) exit(1) str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, help=languagestrings.SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH ) def init(python, virtualenv): """Initialize hitch in this directory.""" if virtualenv is None: if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_VIRTUALENV_INSTALLED) stderr.flush() exit(1) virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "") else: if path.exists(virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* update_requirements() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() if package.startswith("hitch") and package != "hitch" ] # Add packages that start with hitch* to the list of commands available for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{}").commandline.cli.help)'.format( package ) ]).decode('utf8') stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> BUG : Replaced {}'s with {0}'s in strings to make the code work with earlier versions of python. <DFF> @@ -41,7 +41,7 @@ def init(python, virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: - stderr.write("{} not found.\n".format(virtualenv)) + stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE): @@ -57,7 +57,7 @@ def init(python, virtualenv): if path.exists(python): python3 = python else: - stderr.write("{} not found.\n".format(python)) + stderr.write("{0} not found.\n".format(python)) exit(1) str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') @@ -110,16 +110,18 @@ def update_requirements(): with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) + def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") + @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* update_requirements() - binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) + binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. @@ -244,14 +246,14 @@ def run(): if package.startswith("hitch") and package != "hitch" ] - # Add packages that start with hitch* to the list of commands available + # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', - 'import sys;sys.stdout.write(__import__("hitch{}").commandline.cli.help)'.format( + 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8')
7
BUG : Replaced {}'s with {0}'s in strings to make the code work with earlier versions of python.
5
.py
py
agpl-3.0
hitchtest/hitch
1739
<NME> how_does_hitch_compare_to_other_technologies.rst <BEF> How does Hitch compare to other technologies? ============================================= Cucumber/Behave/RSpec/Behat/Behave ---------------------------------- Cucumber, RSpec, Behat and Behave and are all keyword driven test automation frameworks that run automated acceptance tests. They contain an interpreter for executing high level test cases written in Gherkin. Hitch follows a similar approach but has its own equivalent to Gherkin: :doc:`/glossary/hitch_test_description_language`. Unlike Gherkin it does not use its own syntax - its syntax is built upon YAML. Test cases written with Hitch test should usually be less verbose and more to the point, although still ideally maintaining readability. Gherkin example from the Cucumber website (223 characters; English-like): .. code-block:: gherkin Feature: Division In order to avoid silly mistakes Cashiers must be able to calculate a fraction Scenario: Regular numbers * I have entered 3 into the calculator * I press divide * I have entered 2 into the calculator * I press equal * The result should be 1.5 on the screen Hitch equivalent (113 characters; not English-like): .. code-block:: yaml - name: Division description: Cashier calculates a fraction scenario: - Enter: 3 - Press: divide - Enter: 2 - Press: equal - Result: 1.5 Step-to-code regular expression translation is also unnecessary in Hitch sidestepping `potential traps like this. <https://stackoverflow.com/questions/1186547/regular-expressions-in-cucumber-steps>`_ .. note:: This pitfall is `recognized by Cucumber in issue #1. <https://github.com/cucumber/cucumber/issues/1>`_ The python tool behave gives you `three different parser options <https://pythonhosted.org/behave/tutorial.html#step-parameters>`_ as a way to deal with it. There are other `suggested <http://laxmareddy.com/cucumber-step-definitions-regular-expressions-matching-steps/>`_ `workarounds <http://chrismcmahonsblog.blogspot.sg/2013/09/magic-strings-and-regular-expressions.html>`_ too. The above three steps are implemented as follows in Hitch: .. code-block:: python def enter(self, number): # code that enters a number def press(self, key): # code that presses a key def result(self, number): assert displayed_result == number More complex data can also be cleanly encoded into steps and preconditions. Anything that is valid YAML is allowed. You can write a complex step like this: .. code-block:: yaml - Send mail: From address: Receiver <[email protected]> To address: Sender <[email protected]> Body: From: Receiver <[email protected]> To: Sender <[email protected]> Subject: Test email for "HitchSMTP" Content: | http://www.google.com Another link: http://yahoo.com Another link: https://www.google.com.sg/?gfe_rd=cr&ei=2X4mVebUFYTDuATVtoHoAQ#q=long+long+long+long+long+long+url Which would trigger a python method call equivalent to the following: .. code-block:: python self.send_mail( from_address="Receiver <[email protected]>", to_address="To address: Sender <[email protected]>", body={ "From" : "Receiver <[email protected]>", "To" : "Sender <[email protected]>", "Subject" : "Test email for \"HitchSMTP\"" "Content" : ( "http://www.google.com\n" "Another link: http://yahoo.com\n" "Another link: https://www.google.com.sg/?gfe_rd=cr&ei=2X4mVebUFYTDuATVtoHoAQ#q=long+long+long+long+long+long+url" ) } ) Where reading the data in the step code :doc:`/glossary/execution_engine` is still straightforward: .. code-block:: python self.send_mail(self, from_address, to_address, body) content = body.get("content") The above applies to the following packages: * hitchtest Hitch also provides plugins to perform many more test and development related tasks, saving on boilerplate (see :doc:`/plugins/index`). Hitch does *not* provide: * Bindings to write the execution engine in languages other than python. This is not roadmapped and not possible currently. * Plugins to easily test other languages and frameworks (e.g. Java, node, Ruby, etc.). This possible but not easy currently and is roadmapped. Docker/Docker Compose --------------------- Docker is a lightweight virtualization technology that provides system :doc:`/glossary/isolation` using cgroups and kernel namespaces. Docker can be used to develop software in, test software in and deploy software in. By running the same container in all three environments, development and testing can achieve a greater degree of :doc:`/glossary/test_realism` thus avoiding many 'surprise' production bugs. Nonetheless, the isolation and realism is not as high as "true virtualization" (VirtualBox, Xen, VMWare) provided via kernel emulation. The same Docker container running on different systems can (and probably will, for many projects eventually), exhibit different behavior due to different versions of the linux kernel or libc in development, testing and production environments (TODO : verify libc differences??). Due to the reliance on Linux kernel features for isolation, docker also does not work on Mac OS X or BSD platforms without running it in a heavyweight virtual machine. Hitch can run docker containers, as it can any other process (a plugin to make this easier is coming soon). If you deploy docker containers in your production environment, this is a recommended approach since it will bring a greater level of :doc:`/glossary/test_realism`. If you do *not* deploy docker containers in your production environment, you may want to avoid using docker for development and test environments. Hitch achieves a similar, although lower level of isolation and realism using a different approach: * :doc:`/glossary/package_isolation` * :doc:`/glossary/data_isolation` * :doc:`/glossary/process_isolation` * :doc:`/glossary/environment_isolation` You can, for instance, run the exact same database version, python version and redis version that you do in production on your development machine. [ TO DO : docker-compose and starting services bug ] The above applies to the following packages: * hitchserve * hitchtest * All hitch plugins .. note:: You can also run hitch *in* docker. It is regularly tested with the latest version. Built-in Django Testing Framework --------------------------------- Django already comes with four official classes for testing web apps, each of which test at a progressively higher level: * SimpleTestCase - a low level unit tester for Django views. * TransactionTestCase - a low level unit tester for Django views which also rolls back the database. * TestCase - a low level unit tester which performs the above and also loads fixtures and adds django specific assertions. * LiveServerTestCase - a higher level TransactionTestCase which runs the django web server to allow for the use of selenium. See : https://docs.djangoproject.com/en/1.8/topics/testing/tools/ for details. Hitch tests at a higher level than all of these. Hitch is not significantly slower than running individual selenium tests using LiveServerTestCase (it can be faster, in fact). It cannot run tests in parallel, however (LiveServerTestCase can). Hitch is *loosely coupled* to Django. The practical upshot of this is that if you want to *rewrite your whole application* in a different framework - even a different language - the number of lines of code you would have to change in the engine to port the tests over should be very low (for the example app it would require a change to just *seven* lines of code for it to run the same test against flask). Unlike Django, Hitch shuns the use of mock objects, using mock services to perform a similar function. For example, if you want to test sending an email, you configure Django to send a real email to the mock SMTP server rather than using the mock SMTP client. LiveServerTestCase will also *only* run Django as a service. Realistically running Celery as part of your test is simple with Hitch, since it is run as just another service. Running additional services alongside one another is easy with Hitch. See :doc:`/glossary/tight_coupling_and_speed_vs_loose_coupling_and_realism` Tox, PyEnv and Virtualenv ------------------------- Tox is a small, popular python framework that can run unit tests in multiple python environments. It can be used to run unit tests with multiple versions of python if those versions are installed. PyEnv is a small application which can download and compile specific versions of python and run them alongside one another. Virtualenv is a tool for creating a python environment where you can install an isolated group of packages which you can use to run or test an application that depends upon them. Hitch can supplant tox for integration tests (See : :doc:`/howto/parameterize_test_cases`). Hitch *bundles* pyenv and uses it to build a python virtualenv(s) for you. It does this with two lines of code: .. code-block:: python # Define the version of python you want python_package = PythonPackage(version="3.4.3") # Installs python 3.4.3 into ~/.hitchpkg (if it isn't already present) # Creates virtualenv in .hitch folder (if it doesn't already exist) python_package.build() # Python virtualenv you can use with your project: python_package.python == "/path/to/your/project/tests/.hitch/py3.4.3/bin/python" python_package.pip == "/path/to/your/project/tests/.hitch/py3.4.3/bin/pip" The above applies to the following packages: * hitchpython * python-build .. note:: Hitch *also* uses virtualenv to isolate *itself* and the code it runs the :doc:`/glossary/execution_engine` with. This is a virtualenv created with your system's python 3. py.test/nose/unittest2 ---------------------- py.test, nose, unittest and unittest2 are all unit test frameworks, although they are often used to write integration tests. See :doc:`/faq/when_should_i_use_a_unit_test_and_when_should_i_use_an_integration_test` [ TO DO : parameterization, readability, boilerplate to handle services, isolation features, loosely coupled, muliple services ] Robot Framework --------------- [ TO DO ] Other technologies? ------------------- If you'd like to see a comparison with other technologies here or would like to correct something said above, raising a ticket is welcome: https://github.com/hitchtest/hitch/issues/new <MSG> DOCS : Modification to the FAQ comparison <DFF> @@ -192,7 +192,7 @@ The above applies to the following packages: Built-in Django Testing Framework --------------------------------- -Django already comes with four official classes for testing web apps, each of which test at a progressively higher level: +Django already comes with four official classes for unit testing web apps, each of which test at a progressively higher level: * SimpleTestCase - a low level unit tester for Django views. * TransactionTestCase - a low level unit tester for Django views which also rolls back the database. @@ -201,27 +201,17 @@ Django already comes with four official classes for testing web apps, each of wh See : https://docs.djangoproject.com/en/1.8/topics/testing/tools/ for details. -Hitch tests at a higher level than all of these. +Hitch serves as an effective drop in replacement for all of these. While slower, tests written +using hitch should exhibit a greater degree of :doc:`/glossary/test_realism`, :doc:`/glosary/isolation` +and looser :doc:`/glossary/coupling`. -Hitch is not significantly slower than running individual selenium tests using LiveServerTestCase -(it can be faster, in fact). It cannot run tests in parallel, however (LiveServerTestCase can). +Practical benefits: -Hitch is *loosely coupled* to Django. The practical upshot of this is that if you want to *rewrite -your whole application* in a different framework - even a different language - the number of lines of code -you would have to change in the engine to port the tests over should be very low (for the example -app it would require a change to just *seven* lines of code for it to run the same test against flask). - -Unlike Django, Hitch shuns the use of mock objects, using mock services to perform a similar function. For example, -if you want to test sending an email, you configure Django to send a real email to the mock SMTP server rather -than using the mock SMTP client. - -LiveServerTestCase will also *only* run Django as a service. - -Realistically running Celery as part of your test is simple with Hitch, since it is run as just -another service. Running additional services alongside one another is easy with Hitch. - - -See :doc:`/glossary/tight_coupling_and_speed_vs_loose_coupling_and_realism` +* You can run a celery service alongside the test. +* Hitch test maintains stricter database isolation. +* It runs all services with faketime, allowing you to mock the forward passage of time via your tests. +* Looser coupling means that if you refactor or rewrite your application code, you should only need minimal changes to your tests. +* Hitch tests can more easily be made to be :doc:`/glossary/business_readable`. Tox, PyEnv and Virtualenv
10
DOCS : Modification to the FAQ comparison
20
.rst
rst
agpl-3.0
hitchtest/hitch
1740
<NME> engine_api.rst <BEF> Hitch Engine API ================ The Hitch Engine is a python class which is tasked with executing your tests and responding to successes and failures. For a test like this, written in YAML: .. code-block:: yaml - name: Example scenario scenario: - Do something - Do something else The basic Hitch Engine, written in python, would need to look something like this: .. code-block:: python import hitchtest class ExecutionEngine(hitchtest.ExecutionEngine): def set_up(self): # set up code def do_something(self): # code run when test says "Do something" Test steps and their properties are fed to the engine directly as method calls and arguments. All step names and properties are first changed into underscore_case. Example 2 (without variables): .. code-block:: yaml - Do something Is translated into the following method call: .. code-block:: python self.do_something() Example 2 (with a single variable): .. code-block:: yaml - Do something else: value 1 Is translated into the following method call: .. code-block:: python self.do_something_else("value 1") Example 3 (with more than one variable): .. code-block:: yaml Variable 1: Value 1 Variable 2: 2 Is translated into the following method call: .. code-block:: python self.do_complicated_thing(variable_1="Value 1", variable_2="2") Example 4 (with a variable that contains a list): .. code-block:: yaml If the equivalent were written in python it would look like this: - List item 1 - List item 2 Is translated into the following method call: .. code-block:: python self.do_another_complicated_thing(variable_1="value 1", variable_2=["list item 1", "list item 2",]) Example 5 (with a variable that contains a dict): .. code-block:: yaml The python equivalent of that would look like this: Dict item 1: val 1 Dict item 2: val 2 Is translated into the following method call: .. code-block:: python - A 3rd complicated thing: Variable 1: value 1 Variable 2: ----------------------------------------------- Since the tests are written in YAML with optional Jinja2, braces and semicolons have special meanings. Preconditions ------------- self.preconditions is a dictionary representation of the YAML snippet in the test being run. What goes in this variable is up to you. Anything that is valid YAML is allowed. Example: Preconditions ------------- - fixture1.sql python_version: 2.7.3 This will mean your preconditions variable is:: In [1]: self.preconditions Out[1]: {'db_fixtures': ['fixture1.sql'], 'python_version': '2.7.3'} If no preconditions are set, it will set to be an empty dict:: In [1]: self.preconditions Out[1]: {} You can access any properties you set here using python's get method, which you can also use to program in a sensible default:: In [1]: self.preconditions.get('db_fixtures', []) Out[1]: ['fixture1.sql'] Note that while preconditions can contain lists, you can't set preconditions to be a list. Out[1]: ['fixture1.sql'] ---- Tests can also have tags, which let you single out individual tests to run or to run individual tests together: - name: Test with tags tags: to be a list. Tags ---- - Step 1 - Step 2 You can use these to run related sets of tests together like so:: $ hitch test . --tags registration tags: - registration - email - firefox scenario: Description ----------- You can also include comments in the description property. This is to help people understand what the test is doing and why. It is ignored by the engine. $ hitch test . --tags registration,email,firefox Description ----------- You can also include comments in the description property. This where you can put comments in your tests to help explain to people what your test is doing and why. It is ignored by the engine. .. code-block:: yaml - name: Test with long description Stacktrace ---------- self.stacktrace is an object representation of the stack trace that occurred after an exception occurred. It is set to None if no error has occurred while running the test. You can use it to pretty-print a representation of the last error that occurred:: In [1]: print(self.stacktrace.to_template()) [ prints colorized, pretty printed version of the stacktrace ] You can also use it to *dive into* the specific code where the exception occurred, so that you can check the contents of variables at that point or even re-run the code:: In [1]: self.stacktrace[0].ipython() Stacktrace ---------- self.stacktrace is an object representation of the stack trace that occurs after a failure occurs in your test. It is set to None if no error has occurred while running the test. You can use it to pretty-print a representation of the last error that occurred:: In [1]: print(self.stacktrace.to_template()) [ prints colorized, pretty printed version of the stacktrace ] You can also use it to *dive into* the specific engine code where the exception occurred, so that you can check the contents of variables at that point or even re-run the code:: In [1]: self.stacktrace[0].ipython() Entering /home/user/django-remindme/django-remindme-tests/engine.py at line 122 In [1]: on Out[1]: 'register' Settings -------- Test settings are also available in the test engine, e.g.:: In [1]: self.settings Out[1]: {'engine_folder': '/home/user/django-remindme/django-remindme-tests', 'pause_on_failure': True, 'python_version': '2.7.3', 'xvfb': False, 'quiet': False} To read more about setting settings see :doc:`settings`. <MSG> DOCS : Tidied up engine API docs. <DFF> @@ -30,31 +30,31 @@ Step Translation Test steps and their properties are fed to the engine directly as method calls and arguments. All step names and properties are first changed into underscore_case. -Example 2 (without variables): +For example, putting this as a test step: .. code-block:: yaml - Do something -Is translated into the following method call: +Would be equivalent to calling this in your engine: .. code-block:: python self.do_something() -Example 2 (with a single variable): +This, on the other hand (note the semicolon): .. code-block:: yaml - Do something else: value 1 -Is translated into the following method call: +Would be translated into: .. code-block:: python self.do_something_else("value 1") -Example 3 (with more than one variable): +You can include as many arguments as you like in steps like so: .. code-block:: yaml @@ -62,13 +62,13 @@ Example 3 (with more than one variable): Variable 1: Value 1 Variable 2: 2 -Is translated into the following method call: +If the equivalent were written in python it would look like this: .. code-block:: python self.do_complicated_thing(variable_1="Value 1", variable_2="2") -Example 4 (with a variable that contains a list): +Your steps can also contain arguments that contain lists: .. code-block:: yaml @@ -78,13 +78,13 @@ Example 4 (with a variable that contains a list): - List item 1 - List item 2 -Is translated into the following method call: +The python equivalent of that would look like this: .. code-block:: python self.do_another_complicated_thing(variable_1="value 1", variable_2=["list item 1", "list item 2",]) -Example 5 (with a variable that contains a dict): +They can contain dicts (or associative arrays) as well: .. code-block:: yaml @@ -94,7 +94,7 @@ Example 5 (with a variable that contains a dict): Dict item 1: val 1 Dict item 2: val 2 -Is translated into the following method call: +Which in python would be equivalent to this: .. code-block:: python @@ -105,14 +105,16 @@ Careful with semicolons and braces like { and } ----------------------------------------------- Since the tests are written in YAML with optional Jinja2, braces and -semicolons have special meanings. +semicolons have special meanings and must be escaped if you want +to use them. Preconditions ------------- self.preconditions is a dictionary representation of the YAML snippet in the test being run. -What goes in this variable is up to you. Anything that is valid YAML is allowed. +What goes in this snippet is up to you. Anything that is valid YAML and an associative arrays +is allowed. Example: @@ -123,22 +125,22 @@ Example: - fixture1.sql python_version: 2.7.3 -This will mean your preconditions variable is:: +This will mean your preconditions variable will be:: In [1]: self.preconditions Out[1]: {'db_fixtures': ['fixture1.sql'], 'python_version': '2.7.3'} -If no preconditions are set, it will set to be an empty dict:: - - In [1]: self.preconditions - Out[1]: {} - -You can access any properties you set here using python's get method, which -you can also use to program in a sensible default:: +You can access any properties you set here using python's get method (which +you can also use to program in a sensible default):: In [1]: self.preconditions.get('db_fixtures', []) Out[1]: ['fixture1.sql'] +If no preconditions are set, self.preconditions will be an empty dict:: + + In [1]: self.preconditions + Out[1]: {} + Note that while preconditions can contain lists, you can't set preconditions to be a list. @@ -146,7 +148,7 @@ Tags ---- Tests can also have tags, which let you single out individual tests to run -or to run individual tests together: +or to run groups of tests together. Example: - name: Test with tags tags: @@ -157,7 +159,7 @@ or to run individual tests together: - Step 1 - Step 2 -You can use these to run related sets of tests together like so:: +You can use these tags to run related sets of tests together like so:: $ hitch test . --tags registration @@ -169,8 +171,9 @@ Or, if you want to be more specific, you can list the tags, separated by a comma Description ----------- -You can also include comments in the description property. This is to help people -understand what the test is doing and why. +You can also include comments in the description property. This where you can +put comments in your tests to help explain to people what your test is doing +and why. It is ignored by the engine. @@ -192,16 +195,15 @@ It is ignored by the engine. Stacktrace ---------- -self.stacktrace is an object representation of the stack trace that occurred -after an exception occurred. It is set to None if no error has occurred while -running the test. +self.stacktrace is an object representation of the stack trace that occurs after a failure +occurs in your test. It is set to None if no error has occurred while running the test. You can use it to pretty-print a representation of the last error that occurred:: In [1]: print(self.stacktrace.to_template()) [ prints colorized, pretty printed version of the stacktrace ] -You can also use it to *dive into* the specific code where the exception occurred, +You can also use it to *dive into* the specific engine code where the exception occurred, so that you can check the contents of variables at that point or even re-run the code:: In [1]: self.stacktrace[0].ipython()
30
DOCS : Tidied up engine API docs.
28
.rst
rst
agpl-3.0
hitchtest/hitch
1741
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( update_requirements() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) command = [binfile, ] + argv[2:] return_code = call(command) exit(return_code) if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") hitch_directory = hitchdir.get_hitch_directory_or_fail() shutil.rmtree(".hitch") def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> BUG : Exits on ctrl-C unless a command is run. <DFF> @@ -74,6 +74,13 @@ def runpackage(arguments): update_requirements() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1])) command = [binfile, ] + argv[2:] + + # Stop responding to signals - the calling command should take over. + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + signal.signal(signal.SIGHUP, signal.SIG_IGN) + signal.signal(signal.SIGQUIT, signal.SIG_IGN) + return_code = call(command) exit(return_code) @@ -113,11 +120,16 @@ def clean(): hitch_directory = hitchdir.get_hitch_directory_or_fail() shutil.rmtree(".hitch") - def run(): """Run hitch bootstrap CLI""" - signal.signal(signal.SIGINT, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) + def stop_everything(sig, frame): + """Exit hitch.""" + exit(1) + + signal.signal(signal.SIGINT, stop_everything) + signal.signal(signal.SIGTERM, stop_everything) + signal.signal(signal.SIGHUP, stop_everything) + signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related
15
BUG : Exits on ctrl-C unless a command is run.
3
.py
py
agpl-3.0
hitchtest/hitch
1742
<NME> WorkAloneRedisManager.java <BEF> package org.crazycake.shiro.common; import org.crazycake.shiro.IRedisManager; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.ScanParams; import redis.clients.jedis.ScanResult; import java.util.HashSet; import java.util.List; import java.util.Set; /** * Abstract class of RedisManager. */ public abstract class WorkAloneRedisManager implements IRedisManager { /** * We are going to operate redis by acquiring Jedis object. * The subclass should realizes the way to get Jedis objects by implement the getJedis(). * @return Jedis */ protected abstract Jedis getJedis(); /** * Default value of count. */ protected static final int DEFAULT_COUNT = 100; /** * The number of elements returned at every iteration. */ private int count = DEFAULT_COUNT; /** * JedisPoolConfig used to initialize JedisPool. */ private JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); /** * get value from redis * @param key key * @return value */ @Override public byte[] get(byte[] key) { if (key == null) { return null; } byte[] value; Jedis jedis = getJedis(); try { value = jedis.get(key); } finally { jedis.close(); } return value; } * set * @param key key * @param value value * @param expireTime expire time * @return value */ @Override @Override public byte[] set(byte[] key, byte[] value, int expireTime) { if (key == null) { return null; } Jedis jedis = getJedis(); try { jedis.set(key, value); // -1 and 0 is not a valid expire time in Jedis if (expireTime > 0) { jedis.expire(key, expireTime); } } finally { jedis.close(); } return value; } /** * Delete a key-value pair. * @param key key */ @Override public void del(byte[] key) { if (key == null) { return; } Jedis jedis = getJedis(); try { jedis.del(key); } finally { jedis.close(); } } /** * Return the size of redis db. * @param pattern key pattern * @return key-value size */ @Override public Long dbSize(byte[] pattern) { long dbSize = 0L; Jedis jedis = getJedis(); try { ScanParams params = new ScanParams(); params.count(count); params.match(pattern); byte[] cursor = ScanParams.SCAN_POINTER_START_BINARY; ScanResult<byte[]> scanResult; do { scanResult = jedis.scan(cursor, params); List<byte[]> results = scanResult.getResult(); for (byte[] result : results) { dbSize++; } cursor = scanResult.getCursorAsBytes(); } while (scanResult.getCursor().compareTo(ScanParams.SCAN_POINTER_START) > 0); } finally { jedis.close(); } return dbSize; } /** * Return all the keys of Redis db. Filtered by pattern. * @param pattern key pattern * @return key set */ public Set<byte[]> keys(byte[] pattern) { Set<byte[]> keys = new HashSet<byte[]>(); Jedis jedis = getJedis(); try { ScanParams params = new ScanParams(); params.count(count); params.match(pattern); byte[] cursor = ScanParams.SCAN_POINTER_START_BINARY; ScanResult<byte[]> scanResult; do { scanResult = jedis.scan(cursor, params); keys.addAll(scanResult.getResult()); cursor = scanResult.getCursorAsBytes(); } while (scanResult.getCursor().compareTo(ScanParams.SCAN_POINTER_START) > 0); } finally { jedis.close(); } return keys; } public int getCount() { return count; } public void setCount(int count) { this.count = count; } public JedisPoolConfig getJedisPoolConfig() { return jedisPoolConfig; } public void setJedisPoolConfig(JedisPoolConfig jedisPoolConfig) { this.jedisPoolConfig = jedisPoolConfig; } } <MSG> - Check expired sessionInThread in RedisSessionDAO - Add testing sessionInThread in integrationTest <DFF> @@ -60,7 +60,7 @@ public abstract class WorkAloneRedisManager implements IRedisManager { * set * @param key key * @param value value - * @param expireTime expire time + * @param expireTime expire time in second * @return value */ @Override
1
- Check expired sessionInThread in RedisSessionDAO - Add testing sessionInThread in integrationTest
1
.java
java
mit
alexxiyang/shiro-redis
1743
<NME> contributors.rst <BEF> Contributors ============ * Jon Hadfield @jonhadfield * Omer Katz @omerkatz * Flavio Curella @fcurella Additional thanks to ==================== * Phoebe Bright @phoebebright * Rui Pacheco @ruipacheco * Andy Baker @andybak * Daniel Greenfeld @pydanny <MSG> DOCS : Added more thanks. <DFF> @@ -11,3 +11,5 @@ Additional thanks to * Phoebe Bright @phoebebright * Rui Pacheco @ruipacheco * Andy Baker @andybak +* Audrey Roy Greenfeld @audreyr +* Daniel Greenfeld @pydanny
2
DOCS : Added more thanks.
0
.rst
rst
agpl-3.0
hitchtest/hitch
1744
<NME> RedisCacheTest.java <BEF> package org.crazycake.shiro; import org.junit.Before; import org.junit.Test; import java.util.*; import static org.hamcrest.CoreMatchers.*; import static org.junit.Assert.assertThat; import java.util.Iterator; import java.util.List; import java.util.Set; public class RedisCacheTest { private RedisManager redisManager; private RedisCache<String, FakeSession> redisCache; private String testKey; private StringSerializer keySerializer; private ObjectSerializer valueSerializer; private FakeSession testValue; private String testPrefix; private Set<byte[]> testSet; private Collection<FakeSession> testValues; private FakeSession tomSession; private FakeSession paulSession; private FakeSession billySession; private byte[] nullValueByte; @Before public void setUp() throws SerializationException, NoSuchFieldException, IllegalAccessException { testPrefix = "testPrefix:"; testKey = "testKey"; testValue = new FakeSession(); testValue.setId(3); testValue.setName("jack"); keySerializer = new StringSerializer(); valueSerializer = new ObjectSerializer(); testSet = new HashSet<byte[]>(); testSet.add(keySerializer.serialize(testPrefix + "tom")); testSet.add(keySerializer.serialize(testPrefix + "paul")); testSet.add(keySerializer.serialize(testPrefix + "billy")); testValues = new ArrayList<FakeSession>(); tomSession = new FakeSession(1, "tom"); testValues.add(tomSession); paulSession = new FakeSession(2, "paul"); testValues.add(paulSession); billySession = new FakeSession(3, "billy"); testValues.add(billySession); redisManager = mock(RedisManager.class); when(redisManager.dbSize()).thenReturn(2L); when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSerializer.serialize(testValue)); when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet); when(redisManager.get(keySerializer.serialize(testPrefix + "tom"))).thenReturn(valueSerializer.serialize(tomSession)); when(redisManager.get(keySerializer.serialize(testPrefix + "paul"))).thenReturn(valueSerializer.serialize(paulSession)); when(redisManager.get(keySerializer.serialize(testPrefix + "billy"))).thenReturn(valueSerializer.serialize(billySession)); redisCache = new RedisCache<String, FakeSession>(redisManager, keySerializer, valueSerializer, testPrefix, 1); nullValueByte = new byte[0]; } @Test verify(redisManager).set(keySerializer.serialize("employee:3"), valueSerializer.serialize("account information"), 1); new RedisCache<String, String>(null, keySerializer, valueSerializer, "abc:", 1); fail("Excepted exception to be thrown"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(),is("Cache argument cannot be null.")); } RedisCache rc = new RedisCache(new RedisManager(), keySerializer, valueSerializer, "abc", 1); } @Test public void testSize() { assertThat(redisCache.size(), is(2)); } @Test public void testGet() { FakeSession actualValue = redisCache.get(testKey); assertThat(actualValue.getId(), is(3)); assertThat(actualValue.getName(), is("jack")); FakeSession nullValue = redisCache.get(null); assertThat(nullValue, is(nullValue())); FakeSession nonExistValue = redisCache.get("nonExistKey"); assertThat(nonExistValue, is(nullValue)); } @Test public void testPut() { redisCache.put(null, null); verify(redisManager, times(1)).set(null, nullValueByte, 1); redisCache.put(null, new FakeSession()); redisCache.put(testKey, testValue); } @Test public void testRemove() { redisCache.remove(null); FakeSession actualValue = redisCache.remove(testKey); assertThat(actualValue.getId(), is(3)); assertThat(actualValue.getName(), is("jack")); } @Test public void testClear() { redisCache.clear(); } @Test public void testKeys() { Set<String> keys = redisCache.keys(); assertThat(keys.size(), is(3)); assertThat(keys, hasItem(testPrefix + "tom")); } @Test public void testValues() { Collection<FakeSession> values = redisCache.values(); assertThat(values.size(), is(3)); for (Iterator<FakeSession> iterator = values.iterator(); iterator.hasNext(); ) { FakeSession next = iterator.next(); if (next.getId() == 2) { assertThat(next.getName(), is("paul")); } } } } <MSG> Release 2.8.10 - Enhance RedisCache <DFF> @@ -3,7 +3,9 @@ package org.crazycake.shiro; import org.junit.Before; import org.junit.Test; -import java.util.*; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; import static org.hamcrest.CoreMatchers.*; import static org.junit.Assert.assertThat; @@ -13,49 +15,35 @@ import static org.mockito.Mockito.*; public class RedisCacheTest { private RedisManager redisManager; - private RedisCache<String, FakeSession> redisCache; - private String testKey; + private RedisCache<String, FakeAuth> redisCache; private StringSerializer keySerializer; private ObjectSerializer valueSerializer; - private FakeSession testValue; private String testPrefix; - private Set<byte[]> testSet; - private Collection<FakeSession> testValues; - private FakeSession tomSession; - private FakeSession paulSession; - private FakeSession billySession; - private byte[] nullValueByte; + + private String tomKey; + private byte[] tomKeyBytes; + private String paulKey; + private String billyKey; @Before public void setUp() throws SerializationException, NoSuchFieldException, IllegalAccessException { - testPrefix = "testPrefix:"; - testKey = "testKey"; - testValue = new FakeSession(); - testValue.setId(3); - testValue.setName("jack"); keySerializer = new StringSerializer(); valueSerializer = new ObjectSerializer(); - testSet = new HashSet<byte[]>(); - testSet.add(keySerializer.serialize(testPrefix + "tom")); - testSet.add(keySerializer.serialize(testPrefix + "paul")); - testSet.add(keySerializer.serialize(testPrefix + "billy")); - testValues = new ArrayList<FakeSession>(); - tomSession = new FakeSession(1, "tom"); - testValues.add(tomSession); - paulSession = new FakeSession(2, "paul"); - testValues.add(paulSession); - billySession = new FakeSession(3, "billy"); - testValues.add(billySession); redisManager = mock(RedisManager.class); - when(redisManager.dbSize()).thenReturn(2L); - when(redisManager.get(keySerializer.serialize(testPrefix + testKey))).thenReturn(valueSerializer.serialize(testValue)); - when(redisManager.keys(keySerializer.serialize(testPrefix + "*"))).thenReturn(testSet); - when(redisManager.get(keySerializer.serialize(testPrefix + "tom"))).thenReturn(valueSerializer.serialize(tomSession)); - when(redisManager.get(keySerializer.serialize(testPrefix + "paul"))).thenReturn(valueSerializer.serialize(paulSession)); - when(redisManager.get(keySerializer.serialize(testPrefix + "billy"))).thenReturn(valueSerializer.serialize(billySession)); - redisCache = new RedisCache<String, FakeSession>(redisManager, keySerializer, valueSerializer, testPrefix, 1); - - nullValueByte = new byte[0]; + testPrefix = "testPrefix:"; + redisCache = new RedisCache<String, FakeAuth>(redisManager, keySerializer, valueSerializer, testPrefix, 1); + + Set<byte[]> testSet; + testSet = new HashSet<byte[]>(); + tomKey = testPrefix + "tom"; + tomKeyBytes = keySerializer.serialize(tomKey); + testSet.add(tomKeyBytes); + paulKey = testPrefix + "paul"; + testSet.add(keySerializer.serialize(paulKey)); + billyKey = testPrefix + "billy"; + testSet.add(keySerializer.serialize(billyKey)); + byte[] testKeysBytes = keySerializer.serialize(testPrefix + "*"); + when(redisManager.keys(testKeysBytes)).thenReturn(testSet); } @Test @@ -64,7 +52,21 @@ public class RedisCacheTest { new RedisCache<String, String>(null, keySerializer, valueSerializer, "abc:", 1); fail("Excepted exception to be thrown"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(),is("Cache argument cannot be null.")); + assertThat(e.getMessage(),is("redisManager cannot be null.")); + } + + try { + new RedisCache<String, String>(new RedisManager(), null, valueSerializer, "abc:", 1); + fail("Excepted exception to be thrown"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(),is("keySerializer cannot be null.")); + } + + try { + new RedisCache<String, String>(new RedisManager(), keySerializer, null, "abc:", 1); + fail("Excepted exception to be thrown"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(),is("valueSerializer cannot be null.")); } RedisCache rc = new RedisCache(new RedisManager(), keySerializer, valueSerializer, "abc", 1); @@ -73,45 +75,72 @@ public class RedisCacheTest { @Test public void testSize() { + when(redisManager.dbSize()).thenReturn(2L); assertThat(redisCache.size(), is(2)); } @Test - public void testGet() { - FakeSession actualValue = redisCache.get(testKey); - assertThat(actualValue.getId(), is(3)); - assertThat(actualValue.getName(), is("jack")); + public void testGet() throws SerializationException { + FakeAuth nullValue = redisCache.get(null); + assertThat(nullValue, nullValue()); - FakeSession nullValue = redisCache.get(null); - assertThat(nullValue, is(nullValue())); + byte[] adminKeyBytes = keySerializer.serialize(testPrefix + "admin"); + FakeAuth adminFakeAuth = new FakeAuth(1, "admin"); + byte[] adminValueBytes = valueSerializer.serialize(adminFakeAuth); + when(redisManager.get(adminKeyBytes)).thenReturn(adminValueBytes); + + FakeAuth actualValue = redisCache.get("admin"); + assertThat(actualValue.getId(), is(1)); + assertThat(actualValue.getRole(), is("admin")); - FakeSession nonExistValue = redisCache.get("nonExistKey"); + FakeAuth nonExistValue = redisCache.get("nonExistKey"); assertThat(nonExistValue, is(nullValue)); } @Test - public void testPut() { + public void testPut() throws SerializationException { redisCache.put(null, null); - verify(redisManager, times(1)).set(null, nullValueByte, 1); - redisCache.put(null, new FakeSession()); + verify(redisManager, times(0)).set(null, null, 1); + + FakeAuth emptyFakeAuth = new FakeAuth(); + byte[] emptyFakeAuthBytes = valueSerializer.serialize(emptyFakeAuth); + redisCache.put(null, emptyFakeAuth); + verify(redisManager, times(0)).set(null, emptyFakeAuthBytes, 1); + + String testKey = "jack"; + byte[] testKeyBytes = keySerializer.serialize(testPrefix + testKey); + redisCache.put(testKey, null); + verify(redisManager, times(1)).set(testKeyBytes, null, 1); + + FakeAuth testValue = new FakeAuth(2, "user"); + byte[] testValueBytes = valueSerializer.serialize(testValue); redisCache.put(testKey, testValue); + verify(redisManager, times(1)).set(testKeyBytes, testValueBytes, 1); } @Test - public void testRemove() { - redisCache.remove(null); - FakeSession actualValue = redisCache.remove(testKey); + public void testRemove() throws SerializationException { + FakeAuth nullValue = redisCache.remove(null); + assertThat(nullValue, is(nullValue())); + + String testKey = "billy"; + byte[] testKeyBytes = keySerializer.serialize(testPrefix + testKey); + FakeAuth testValue = new FakeAuth(3, "client"); + byte[] testValueBytes = valueSerializer.serialize(testValue); + when(redisManager.get(testKeyBytes)).thenReturn(testValueBytes); + FakeAuth actualValue = redisCache.remove(testKey); assertThat(actualValue.getId(), is(3)); - assertThat(actualValue.getName(), is("jack")); + assertThat(actualValue.getRole(), is("client")); } @Test - public void testClear() { + public void testClear() throws SerializationException { redisCache.clear(); + verify(redisManager, times(1)).del(tomKeyBytes); } @Test - public void testKeys() { + public void testKeys() throws SerializationException { Set<String> keys = redisCache.keys(); assertThat(keys.size(), is(3)); assertThat(keys, hasItem(testPrefix + "tom")); @@ -120,14 +149,20 @@ public class RedisCacheTest { } @Test - public void testValues() { - Collection<FakeSession> values = redisCache.values(); + public void testValues() throws SerializationException { + FakeAuth tomFakeAuth = new FakeAuth(1, "admin"); + mockRedisManagerGet(tomKey, tomFakeAuth); + FakeAuth paulFakeAuth = new FakeAuth(2, "client"); + mockRedisManagerGet(paulKey, paulFakeAuth); + FakeAuth billyFakeAuth = new FakeAuth(3, "user"); + mockRedisManagerGet(billyKey, billyFakeAuth); + Collection<FakeAuth> values = redisCache.values(); assertThat(values.size(), is(3)); - for (Iterator<FakeSession> iterator = values.iterator(); iterator.hasNext(); ) { - FakeSession next = iterator.next(); - if (next.getId() == 2) { - assertThat(next.getName(), is("paul")); - } - } + } + + private void mockRedisManagerGet(String key, FakeAuth value) throws SerializationException { + byte[] keyByte = keySerializer.serialize(key); + byte[] valueByte = valueSerializer.serialize(value); + when(redisManager.get(keyByte)).thenReturn(valueByte); } }
95
Release 2.8.10 - Enhance RedisCache
60
.java
java
mit
alexxiyang/shiro-redis
1745
<NME> README.md <BEF> shiro-redis ============= ## Introduction How to use it? =========== Add sonatype release repository into your setting.xml ```xml <repository> <id>sonatype.maven.release</id> <name>Sonatype Release Repository</name> <url>https://oss.sonatype.org/content/repositories/releases</url> </repository> ``` After updated indexes you can add dependency of shiro-redis into your pom.xml ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>2.0.0-RELEASE</version> </dependency> ``` Edit shiro.ini <MSG> change to download <DFF> @@ -6,25 +6,7 @@ shiro only provide the support of ehcache and concurrentHashMap. Here is an impl How to use it? =========== -Add sonatype release repository into your setting.xml - -```xml -<repository> - <id>sonatype.maven.release</id> - <name>Sonatype Release Repository</name> - <url>https://oss.sonatype.org/content/repositories/releases</url> -</repository> -``` - -After updated indexes you can add dependency of shiro-redis into your pom.xml - -```xml -<dependency> - <groupId>org.crazycake</groupId> - <artifactId>shiro-redis</artifactId> - <version>2.0.0-RELEASE</version> -</dependency> -``` +Download shiro-redis.jar in bin folder and add it into your classpath. Edit shiro.ini
1
change to download
19
.md
md
mit
alexxiyang/shiro-redis
1746
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; /** * doReadSession be called about 10 times when login. Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. The default value is 1000 milliseconds (1s). Most of time, you don't need to change it. */ private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * */ private int expire = DEFAULT_EXPIRE; private IRedisManager redisManager; private RedisSerializer keySerializer = new StringSerializer(); private RedisSerializer valueSerializer = new ObjectSerializer(); private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire * @param session * @throws UnknownSessionException */ private void saveSession(Session session) throws UnknownSessionException{ if(session == null || session.getId() == null){ logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } * redisManager used for communicate with Redis */ private IRedisManager redisManager; /** * Serializer of key logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire * 1000 < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * 1000) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); } this.redisManager.set(key, value, expire); } @Override public void delete(Session session) { if(session == null || session.getId() == null){ logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(this.keySerializer.serialize(this.keyPrefix + "*")); if(keys != null && keys.size()>0){ for(byte[] key:keys){ Session s = (Session)valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } @Override protected Serializable doCreate(Session session) { if(session == null){ logger.error("session is null"); throw new UnknownSessionException("session is null"); } redisManager.set(key, value, expire); } /** * delete session @Override protected Session doReadSession(Serializable sessionId) { if(sessionId == null){ logger.warn("session id is null"); return null; } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { logger.debug("read session from redis"); try { s = (Session)valueSerializer.deserialize(redisManager.get(keySerializer.serialize(getRedisSessionKey(sessionId)))); setSessionToThreadLocal(sessionId, s); } catch (SerializationException e) { logger.error("read session error. settionId=" + sessionId); } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Add checkstyle <DFF> @@ -18,7 +18,10 @@ public class RedisSessionDAO extends AbstractSessionDAO { private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; /** - * doReadSession be called about 10 times when login. Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. The default value is 1000 milliseconds (1s). Most of time, you don't need to change it. + * doReadSession be called about 10 times when login. + * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. + * The default value is 1000 milliseconds (1s). + * Most of time, you don't need to change it. */ private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; @@ -30,6 +33,8 @@ public class RedisSessionDAO extends AbstractSessionDAO { */ private int expire = DEFAULT_EXPIRE; + private static final int MILLISECONDS_IN_A_SECOND = 1000; + private IRedisManager redisManager; private RedisSerializer keySerializer = new StringSerializer(); private RedisSerializer valueSerializer = new ObjectSerializer(); @@ -45,8 +50,8 @@ public class RedisSessionDAO extends AbstractSessionDAO { * @param session * @throws UnknownSessionException */ - private void saveSession(Session session) throws UnknownSessionException{ - if(session == null || session.getId() == null){ + private void saveSession(Session session) throws UnknownSessionException { + if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } @@ -59,15 +64,19 @@ public class RedisSessionDAO extends AbstractSessionDAO { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } - if (expire * 1000 < session.getTimeout()) { - logger.warn("Redis session expire time: " + (expire * 1000) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); + if (expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { + logger.warn("Redis session expire time: " + + (expire * MILLISECONDS_IN_A_SECOND) + + " is less than Session timeout: " + + session.getTimeout() + + " . It may cause some problems."); } this.redisManager.set(key, value, expire); } @Override public void delete(Session session) { - if(session == null || session.getId() == null){ + if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } @@ -83,9 +92,9 @@ public class RedisSessionDAO extends AbstractSessionDAO { Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(this.keySerializer.serialize(this.keyPrefix + "*")); - if(keys != null && keys.size()>0){ - for(byte[] key:keys){ - Session s = (Session)valueSerializer.deserialize(redisManager.get(key)); + if (keys != null && keys.size() > 0) { + for (byte[] key:keys) { + Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } @@ -97,7 +106,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { @Override protected Serializable doCreate(Session session) { - if(session == null){ + if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } @@ -109,7 +118,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { @Override protected Session doReadSession(Serializable sessionId) { - if(sessionId == null){ + if (sessionId == null) { logger.warn("session id is null"); return null; } @@ -121,7 +130,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { logger.debug("read session from redis"); try { - s = (Session)valueSerializer.deserialize(redisManager.get(keySerializer.serialize(getRedisSessionKey(sessionId)))); + s = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(getRedisSessionKey(sessionId)))); setSessionToThreadLocal(sessionId, s); } catch (SerializationException e) { logger.error("read session error. settionId=" + sessionId);
21
Add checkstyle
12
.java
java
mit
alexxiyang/shiro-redis
1747
<NME> LICENSE <BEF> ADDFILE <MSG> add MIT LICENSE <DFF> @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 xi yang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file
21
add MIT LICENSE
0
LICENSE
mit
alexxiyang/shiro-redis
1748
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * Used for setting/getting authentication information from Redis */ public class RedisSessionDAO extends AbstractSessionDAO { private static Logger logger = LoggerFactory.getLogger(RedisSessionDAO.class); private static final String DEFAULT_SESSION_KEY_PREFIX = "shiro:session:"; private String keyPrefix = DEFAULT_SESSION_KEY_PREFIX; /** * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; private int expire = DEFAULT_EXPIRE; private static final int MILLISECONDS_IN_A_SECOND = 1000; /** * redisManager used for communicate with Redis */ private IRedisManager redisManager; /** * Serializer of key */ private RedisSerializer keySerializer = new StringSerializer(); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { key = keySerializer.serialize(getRedisSessionKey(session.getId())); value = valueSerializer.serialize(session); } catch (SerializationException e) { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * MILLISECONDS_IN_A_SECOND) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); } redisManager.set(key, value, expire); } /** * delete session * @param session */ @Override public void delete(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.delSessionFromThreadLocal(session.getId()); } try { redisManager.del(keySerializer.serialize(getRedisSessionKey(session.getId()))); } catch (SerializationException e) { logger.error("delete session error. session id=" + session.getId()); } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; private void removeExpiredSessionInMemory(Map<Serializable, SessionInMemory> sessionMap) { Iterator<Serializable> it = sessionMap.keySet().iterator(); while(it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Remove `checkstyle` dependency. Make `jedisPool` as a configurable attribute <DFF> @@ -176,7 +176,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { private void removeExpiredSessionInMemory(Map<Serializable, SessionInMemory> sessionMap) { Iterator<Serializable> it = sessionMap.keySet().iterator(); - while(it.hasNext()) { + while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) {
1
Remove `checkstyle` dependency. Make `jedisPool` as a configurable attribute
1
.java
java
mit
alexxiyang/shiro-redis
1749
<NME> RedisClusterManagerTest.java <BEF> package org.crazycake.shiro; import org.junit.Before; import org.junit.Test; import redis.clients.jedis.JedisCluster; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Mockito.*; public class RedisClusterManagerTest { @BeforeEach public void setUp() { jedisCluster = mock(JedisCluster.class); redisClusterManager = new RedisClusterManager(); redisClusterManager.setJedisCluster(jedisCluster); } redisClusterManager.setJedisCluster(jedisCluster); } @Test public void get() { byte[] value = redisClusterManager.get(null); @Test public void set() { redisClusterManager.set(null, null, -1); verify(jedisCluster, times(0)).set(any(byte[].class), any(byte[].class)); redisClusterManager.set(new byte[0], new byte[0], -1); verify(jedisCluster, times(1)).set(any(byte[].class), any(byte[].class)); verify(jedisCluster, times(0)).expire(any(byte[].class), any(int.class)); redisClusterManager.set(new byte[0], new byte[0], 0); verify(jedisCluster, times(1)).expire(any(byte[].class), any(int.class)); redisClusterManager.set(new byte[0], new byte[0], 1); verify(jedisCluster, times(2)).expire(any(byte[].class), any(int.class)); } } <MSG> Release 3.2.3 <DFF> @@ -1,5 +1,6 @@ package org.crazycake.shiro; +import org.junit.After; import org.junit.Before; import org.junit.Test; import redis.clients.jedis.JedisCluster; @@ -9,6 +10,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Mockito.*; +import static fixture.TestFixture.*; public class RedisClusterManagerTest { @@ -22,6 +24,11 @@ public class RedisClusterManagerTest { redisClusterManager.setJedisCluster(jedisCluster); } + @After + public void tearDown() { + blastRedis(); + } + @Test public void get() { byte[] value = redisClusterManager.get(null);
7
Release 3.2.3
0
.java
java
mit
alexxiyang/shiro-redis
1750
<NME> README.md <BEF> # Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework ![](http://dragon.seetatech.com/static/images/styles-dragon.png) ----- ## Deprecated. See [seetaresearch/Dragon](http://github.com/seetaresearch/Dragon). f = theano.function(outputs=y) # force the training phase even without gradients computation ``` <MSG> add license and citation <DFF> @@ -177,3 +177,16 @@ import dragon.vm.theano as theano f = theano.function(outputs=y) # force the training phase even without gradients computation ``` +## License and Citation + +Dragon is released under the [BSD 2-Clause license](https://github.com/neopenx/Dragon/blob/master/LICENSE). + +Please cite Dragon in your publications if it helps your research: + + @article{pan2017dragon, + Author = {Pan, Ting}, + Journal = {arXiv preprint arXiv:1707.08265}, + Title = {Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework}, + Year = {2017} + } +
13
add license and citation
0
.md
md
bsd-2-clause
neopenx/Dragon
1751
<NME> README.md <BEF> # Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework ![](http://dragon.seetatech.com/static/images/styles-dragon.png) ----- ## Deprecated. See [seetaresearch/Dragon](http://github.com/seetaresearch/Dragon). f = theano.function(outputs=y) # force the training phase even without gradients computation ``` <MSG> add license and citation <DFF> @@ -177,3 +177,16 @@ import dragon.vm.theano as theano f = theano.function(outputs=y) # force the training phase even without gradients computation ``` +## License and Citation + +Dragon is released under the [BSD 2-Clause license](https://github.com/neopenx/Dragon/blob/master/LICENSE). + +Please cite Dragon in your publications if it helps your research: + + @article{pan2017dragon, + Author = {Pan, Ting}, + Journal = {arXiv preprint arXiv:1707.08265}, + Title = {Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework}, + Year = {2017} + } +
13
add license and citation
0
.md
md
bsd-2-clause
neopenx/Dragon
1752
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.2.3</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. ## shiro-core/jedis Version Comparison Charts | shiro-redis | jedis | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0(Unrelease) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Update README.md <DFF> @@ -16,7 +16,7 @@ You use either of the following 2 ways to include `shiro-redis` into your projec <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> - <version>3.2.3</version> + <version>3.3.0</version> </dependency> ``` @@ -25,10 +25,10 @@ You use either of the following 2 ways to include `shiro-redis` into your projec ## shiro-core/jedis Version Comparison Charts -| shiro-redis | jedis | jedis | +| shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | -| 3.3.0(Unrelease) | 1.6.0 | 3.3.0 | +| 3.3.0 | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>`
3
Update README.md
3
.md
md
mit
alexxiyang/shiro-redis
1753
<NME> RedisSentinelManager.java <BEF> package org.crazycake.shiro; import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.JedisSentinelPool; import redis.clients.jedis.Protocol; import java.util.Collections; import java.util.HashSet; import java.util.Set; public class RedisSentinelManager extends WorkAloneRedisManager implements IRedisManager { private static final String DEFAULT_HOST = "127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"; private String host = DEFAULT_HOST; private static final String DEFAULT_MASTER_NAME = "mymaster"; private String masterName = DEFAULT_MASTER_NAME; // timeout for jedis try to connect to redis server, not expire time! In milliseconds private int timeout = Protocol.DEFAULT_TIMEOUT; // timeout for jedis try to read data from redis server private int soTimeout = Protocol.DEFAULT_TIMEOUT; private String password; private int database = Protocol.DEFAULT_DATABASE; private JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); private void init() { synchronized (this) { synchronized (RedisSentinelManager.class) { if (jedisPool == null) { String[] sentinelHosts = host.split(",\\s*"); Set<String> sentinels = new HashSet<String>(); Collections.addAll(sentinels, sentinelHosts); jedisPool = new JedisSentinelPool(masterName, sentinels, getJedisPoolConfig(), timeout, soTimeout, password, database); } } @Override protected void checkAndInit() { if (jedisPool == null) { init(); } } public String getHost() { return host; } return host; } this.host = host; } public int getTimeout() { return timeout; } return timeout; } public void setTimeout(int timeout) { this.timeout = timeout; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public int getDatabase() { return database; } public void setDatabase(int database) { this.database = database; } public String getMasterName() { return masterName; } this.masterName = masterName; } public JedisPoolConfig getJedisPoolConfig() { return jedisPoolConfig; } public void setJedisPoolConfig(JedisPoolConfig jedisPoolConfig) { this.jedisPoolConfig = jedisPoolConfig; } public int getSoTimeout() { return soTimeout; } return soTimeout; public void setSoTimeout(int soTimeout) { this.soTimeout = soTimeout; } } public JedisSentinelPool getJedisPool() { return jedisPool; } public void setJedisPool(JedisSentinelPool jedisPool) { this.jedisPool = jedisPool; } } <MSG> Merge pull request #38 from xchendeveloper/master-fork Code refactoring <DFF> @@ -1,5 +1,6 @@ package org.crazycake.shiro; +import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.JedisSentinelPool; import redis.clients.jedis.Protocol; @@ -26,7 +27,15 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana private int database = Protocol.DEFAULT_DATABASE; - private JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); + private JedisSentinelPool jedisPool; + + @Override + protected Jedis getJedis() { + if(jedisPool == null){ + init(); + } + return jedisPool.getResource(); + } private void init() { synchronized (this) { @@ -39,13 +48,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana } } - @Override - protected void checkAndInit() { - if (jedisPool == null) { - init(); - } - } - public String getHost() { return host; } @@ -54,8 +56,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana this.host = host; } - - public int getTimeout() { return timeout; } @@ -88,14 +88,6 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana this.masterName = masterName; } - public JedisPoolConfig getJedisPoolConfig() { - return jedisPoolConfig; - } - - public void setJedisPoolConfig(JedisPoolConfig jedisPoolConfig) { - this.jedisPoolConfig = jedisPoolConfig; - } - public int getSoTimeout() { return soTimeout; } @@ -103,4 +95,5 @@ public class RedisSentinelManager extends BaseRedisManager implements IRedisMana public void setSoTimeout(int soTimeout) { this.soTimeout = soTimeout; } + }
11
Merge pull request #38 from xchendeveloper/master-fork
18
.java
java
mit
alexxiyang/shiro-redis
1754
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. > **注意**:\ > 请不要使用3.1.0以下版本 # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Add Jedis Version Comparison Charts Add Jedis Version Comparison Charts <DFF> @@ -25,6 +25,13 @@ You use either of the following 2 ways to include `shiro-redis` into your projec > **注意**:\ > 请不要使用3.1.0以下版本 +## Jedis Version Comparison Charts + +| shiro-redis | jedis | +| :----------------:| :-------: | +| 3.2.3 | 2.9.0 | +| 3.3.0(Unrelease) | 3.3.0 | + # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>`
7
Add Jedis Version Comparison Charts
0
.md
md
mit
alexxiyang/shiro-redis
1755
<NME> test_driven_development.rst <BEF> ADDFILE <MSG> DOCS : General updates to the documentation <DFF> @@ -0,0 +1,15 @@ +Test Driven Development +======================= + +Test driven development is a development style where the developer +writes a test *before* implementing a feature. + +While it is commonly used to refer only to :doc:`unit_test_driven_development`, +this is not the only form of TDD, and not even a form of TDD that works +most of the time. + +Hitch aims to provide a environment where :doc:`acceptance_test_driven_development` +is not only easy to do, it becomes an obvious approach to writing software. + + +* See: `Test Driven Development wikipedia page <https://en.wikipedia.org/wiki/Test_driven_development>`_.
15
DOCS : General updates to the documentation
0
.rst
rst
agpl-3.0
hitchtest/hitch
1756
<NME> RedisCacheTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.subject.PrincipalCollection; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Set; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; import static org.mockito.Mockito.*; public class RedisCacheTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisCache mountRedisCache() { return new RedisCache(redisManager, new StringSerializer(), new ObjectSerializer(), "employee:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME); } @Test public void testInitialize() { Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(null, null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), null, null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); Assertions.assertThrows(IllegalArgumentException.class, () -> new RedisCache<String, String>(new RedisManager(), new StringSerializer(), null, "abc:", 1, RedisCacheManager.DEFAULT_PRINCIPAL_ID_FIELD_NAME)); } @Test public void testPut() throws SerializationException { RedisCache rc = mountRedisCache(); Object value = rc.put("foo", "bar"); assertThat(value, is("bar")); verify(redisManager).set(keySerializer.serialize("employee:foo"), valueSerializer.serialize("bar"), 1); PrincipalCollection principal = new EmployeePrincipal(3); rc.put(principal, "account information"); verify(redisManager).set(keySerializer.serialize("employee:3"), valueSerializer.serialize("account information"), 1); } } class Employee { private int id; public Employee(int id) { this.id = id; } public int getId() { return this.id; } } class EmployeePrincipal implements PrincipalCollection { private Employee primaryPrincipal; public EmployeePrincipal(int id) { this.primaryPrincipal = new Employee(id); } @Override public Employee getPrimaryPrincipal() { return this.primaryPrincipal; } @Override public <T> T oneByType(Class<T> aClass) { } @Test public void testSize() { when(redisManager.dbSize()).thenReturn(2L); assertThat(redisCache.size(), is(2)); } @Override public List asList() { return null; } @Override public Set asSet() { return null; } @Override public Collection fromRealm(String s) { return null; } @Override public Set<String> getRealmNames() { return null; } @Override public boolean isEmpty() { return false; } @Override public Iterator iterator() { return null; } } <MSG> - Change dbSize implements into using scan <DFF> @@ -82,8 +82,8 @@ public class RedisCacheTest { } @Test - public void testSize() { - when(redisManager.dbSize()).thenReturn(2L); + public void testSize() throws SerializationException { + when(redisManager.dbSize(keySerializer.serialize(testPrefix + "*"))).thenReturn(2L); assertThat(redisCache.size(), is(2)); }
2
- Change dbSize implements into using scan
2
.java
java
mit
alexxiyang/shiro-redis
1757
<NME> languagestrings.py <BEF> SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH = """\ Create hitch virtualenv using specific python version (e.g. /usr/bin/python3). Defaults to using python3 on the system path.""" SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH = """\ Create hitch virtualenv using specific virtualenv (e.g. /usr/bin/virtualenv). Defaults to using virtualenv on the system path.""" YOU_MUST_HAVE_VIRTUALENV_INSTALLED = """\ You must have virtualenv installed to use hitch. Suggestions: #1 Install via your system's package manager: - On Ubuntu/Debian : sudo apt-get install python-virtualenv - On Fedora : sudo yum install python-virtualenv - On Arch : sudo pacman -Sy python-virtualenv - On Mac OS X : pip install --upgrade virtualenv To use Hitch, you must have python 3 installed on your system and available. If your python3 is not on the system path with the name python3, specify its exact location using --python. """ YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33 = """\ Hitch must have python 3.3 or higher installed to run. Your app can run with earlier versions of python, but the tests can't. """ ERROR_INITIALIZING_HITCH = """\ hitch init --python=/path/to/python3/bin/python3 """ YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33 = """\ Hitch must have python 3.3 or higher installed to run. Your app can run with earlier versions of python, but the testing environment cannot. Suggestions: #1 You may need to run a sytem upgrade or upgrade your OS. #2 If you have python 3.3+ installed but it is not accessible on the system path with the command 'python3', you can run: hitch init --virtualenv=/path/to/python3/bin/virtualenv OR hitch init --python=/path/to/python3/bin/python3 """ ERROR_INITIALIZING_HITCH = """\ \nError initializing hitch. Problem checklist:\n * Was there a problem with your internet? * Was there a python package being installed that couldn't compile?\n Try searching for any errors printed above or raising an issue at: http://github.com/hitchtest/hitch/issues/ """ HITCH_DIRECTORY_MOVED = """\ The hitch directory '{0}' was moved. "Run 'hitch clean' then run 'hitch init' in this directory: ==> {1} """ HITCH_NOT_INITIALIZED = """\ Hitch has not been initialized in this directory, or any of the directories beneath it:\n""" SOMETHING_CORRUPTED = """\ WARNING: Hitch directory was corrupted. Run 'hitch clean' and hitch init again.\n """ UPDATING_REQUIREMENTS = """\ Updating installed packages to bring them in alignment with the contents of hitchreqs.txt\n""" <MSG> FEATURE : Added more clarification to error strings. <DFF> @@ -22,11 +22,19 @@ YOU_MUST_HAVE_PYTHON3_INSTALLED = """\ To use Hitch, you must have python 3 installed on your system and available. If your python3 is not on the system path with the name python3, specify its exact location using --python. + +To install: + - On Ubuntu/Debian : sudo apt-get install python3 + - On Fedora : sudo yum install python3 + - On Arch : sudo pacman -Sy python3 + - On Mac OS X : brew install python3 """ YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33 = """\ Hitch must have python 3.3 or higher installed to run. Your app can run with earlier versions of python, but the tests can't. + +You may need to run a sytem upgrade or upgrade your OS. """ ERROR_INITIALIZING_HITCH = """\
8
FEATURE : Added more clarification to error strings.
0
.py
py
agpl-3.0
hitchtest/hitch
1758
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * Used for setting/getting authentication information from Redis */ public class RedisSessionDAO extends AbstractSessionDAO { private static Logger logger = LoggerFactory.getLogger(RedisSessionDAO.class); private static final String DEFAULT_SESSION_KEY_PREFIX = "shiro:session:"; private String keyPrefix = DEFAULT_SESSION_KEY_PREFIX; /** * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; private int expire = DEFAULT_EXPIRE; private static final int MILLISECONDS_IN_A_SECOND = 1000; /** * redisManager used for communicate with Redis */ private IRedisManager redisManager; /** * Serializer of key */ private RedisSerializer keySerializer = new StringSerializer(); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { key = keySerializer.serialize(getRedisSessionKey(session.getId())); value = valueSerializer.serialize(session); } catch (SerializationException e) { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " logger.debug("read session from redis"); try { s = (Session)valueSerializer.deserialize(redisManager.get(keySerializer.serialize(getRedisSessionKey(sessionId)))); // threadLocalSession.set(s); } catch (SerializationException e) { logger.error("read session error. settionId=" + sessionId); } /** * delete session * @param session */ @Override public void delete(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.delSessionFromThreadLocal(session.getId()); } try { redisManager.del(keySerializer.serialize(getRedisSessionKey(session.getId()))); } catch (SerializationException e) { logger.error("delete session error. session id=" + session.getId()); } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Merge pull request #7 from alexxiyang/master update <DFF> @@ -108,7 +108,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { logger.debug("read session from redis"); try { s = (Session)valueSerializer.deserialize(redisManager.get(keySerializer.serialize(getRedisSessionKey(sessionId)))); - // threadLocalSession.set(s); + threadLocalSession.set(s); } catch (SerializationException e) { logger.error("read session error. settionId=" + sessionId); }
1
Merge pull request #7 from alexxiyang/master
1
.java
java
mit
alexxiyang/shiro-redis
1759
<NME> RedisSessionDAOTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.model.FakeSession; import org.crazycake.shiro.serializer.StringSerializer; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.Serializable; import java.util.Collection; import java.util.Map; import static fixture.TestFixture.*; import static org.junit.Assert.fail; public class RedisSessionDAOTest { private RedisSessionDAO redisSessionDAO; private FakeSession session1; private FakeSession session2; private FakeSession emptySession; private String name1; private String prefix; private void blast() { blastRedis(); } private void scaffold() { prefix = scaffoldPrefix(); RedisManager redisManager = scaffoldStandaloneRedisManager(); redisSessionDAO = scaffoldRedisSessionDAO(redisManager, prefix); session1 = scaffoldSession(); session2 = scaffoldSession(); emptySession = scaffoldEmptySession(); name1 = scaffoldUsername(); } @Before public void setUp() { blast(); scaffold(); } @After public void tearDown() { blast(); } @Test public void testDoCreateNull() { try { redisSessionDAO.doCreate(null); fail(); } catch (UnknownSessionException e) { assertEquals(e.getMessage(), "session is null"); } } @Test public void testDoCreate() { redisSessionDAO.doCreate(session1); Session actualSession = redisSessionDAO.doReadSession(session1.getId()); assertSessionEquals(actualSession, session1); } @Test public void testDoCreateWithSessionTimeout() { doSetSessionDAOExpire(redisSessionDAO, -2); redisSessionDAO.doCreate(session2); assertEquals(getRedisTTL(prefix + session2.getId(), new StringSerializer()), 1800L); } @Test public void testUpdateNull() { try { redisSessionDAO.update(null); fail(); } catch (UnknownSessionException e) { assertEquals(e.getMessage(), "session or session id is null"); } } @Test public void testUpdateEmptySession() { try { redisSessionDAO.update(emptySession); fail(); } catch (UnknownSessionException e) { assertEquals(e.getMessage(), "session or session id is null"); } } @Test public void testUpdate() { redisSessionDAO.doCreate(session1); redisSessionDAO.doReadSession(session1.getId()); doChangeSessionName(session1, name1); redisSessionDAO.update(session1); FakeSession actualSession = (FakeSession)redisSessionDAO.doReadSession(session1.getId()); assertEquals(actualSession.getName(), name1); } @Test public void testUpdateWithoutSessionInMemory() { redisSessionDAO.setSessionInMemoryEnabled(false); redisSessionDAO.doCreate(session1); redisSessionDAO.doReadSession(session1.getId()); doChangeSessionName(session1, name1); redisSessionDAO.update(session1); FakeSession actualSession = (FakeSession)redisSessionDAO.doReadSession(session1.getId()); assertEquals(actualSession.getName(), name1); } @Test public void testDelete() { redisSessionDAO.doCreate(session1); redisSessionDAO.delete(session1); assertRedisEmpty(); } @Test public void testGetActiveSessions() { redisSessionDAO.doCreate(session1); redisSessionDAO.doCreate(session2); Collection<Session> activeSessions = redisSessionDAO.getActiveSessions(); assertEquals(activeSessions.size(), 2); } @Test public void testRemoveExpiredSessionInMemory() throws InterruptedException, SerializationException { redisSessionDAO.setSessionInMemoryTimeout(500L); redisSessionDAO.doCreate(session1); redisSessionDAO.doReadSession(session1.getId()); Thread.sleep(1000); redisSessionDAO.doCreate(session2); redisSessionDAO.doReadSession(session2.getId()); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) redisSessionDAO.getSessionsInThread().get(); assertEquals(sessionMap.size(), 1); } } <MSG> - Upgrade packages version - Add integration-test - Update unit tests - Correct double checked locking <DFF> @@ -1,143 +1,161 @@ package org.crazycake.shiro; +import org.apache.shiro.session.InvalidSessionException; import org.apache.shiro.session.Session; -import org.apache.shiro.session.UnknownSessionException; +import org.crazycake.shiro.common.IRedisManager; import org.crazycake.shiro.exception.SerializationException; -import org.crazycake.shiro.model.FakeSession; +import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.StringSerializer; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.Serializable; import java.util.Collection; -import java.util.Map; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; -import static fixture.TestFixture.*; -import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.CoreMatchers.*; public class RedisSessionDAOTest { + private IRedisManager redisManager; + private StringSerializer keySerializer = new StringSerializer(); + private ObjectSerializer valueSerializer = new ObjectSerializer(); - private RedisSessionDAO redisSessionDAO; - private FakeSession session1; - private FakeSession session2; - private FakeSession emptySession; - private String name1; - private String prefix; - private void blast() { - blastRedis(); + @BeforeEach + public void setUp() { + redisManager = mock(IRedisManager.class); } - private void scaffold() { - prefix = scaffoldPrefix(); - RedisManager redisManager = scaffoldStandaloneRedisManager(); - redisSessionDAO = scaffoldRedisSessionDAO(redisManager, prefix); - session1 = scaffoldSession(); - session2 = scaffoldSession(); - emptySession = scaffoldEmptySession(); - name1 = scaffoldUsername(); + private RedisSessionDAO mountRedisSessionDAO(Integer expire) { + RedisSessionDAO redisSessionDAO = new RedisSessionDAO(); + if (expire != null) { + redisSessionDAO.setExpire(expire); + } + redisSessionDAO.setKeyPrefix("student:"); + redisSessionDAO.setRedisManager(redisManager); + return redisSessionDAO; } - @Before - public void setUp() { - blast(); - scaffold(); + @Test + public void testUpdate() throws SerializationException { + RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); + StudentSession session = new StudentSession(99, 2000); + sessionDAO.update(session); + verify(redisManager).set(keySerializer.serialize("student:99"), valueSerializer.serialize(session), 2); } - @After - public void tearDown() { - blast(); + @Test + public void testUpdateByCustomExpire() throws SerializationException { + RedisSessionDAO sessionDAO = mountRedisSessionDAO(3); + StudentSession session = new StudentSession(98, 2000); + sessionDAO.update(session); + verify(redisManager).set(keySerializer.serialize("student:98"), valueSerializer.serialize(session), 3); } @Test - public void testDoCreateNull() { - try { - redisSessionDAO.doCreate(null); - fail(); - } catch (UnknownSessionException e) { - assertEquals(e.getMessage(), "session is null"); - } + public void testUpdateByNoExpire() throws SerializationException { + RedisSessionDAO sessionDAO = mountRedisSessionDAO(-1); + StudentSession session = new StudentSession(97, 2000); + sessionDAO.update(session); + verify(redisManager).set(keySerializer.serialize("student:97"), valueSerializer.serialize(session), -1); } @Test - public void testDoCreate() { - redisSessionDAO.doCreate(session1); - Session actualSession = redisSessionDAO.doReadSession(session1.getId()); - assertSessionEquals(actualSession, session1); + public void testDelete() throws SerializationException { + RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); + StudentSession session = new StudentSession(96, 1000); + sessionDAO.delete(session); + verify(redisManager).del(keySerializer.serialize("student:96")); } @Test - public void testDoCreateWithSessionTimeout() { - doSetSessionDAOExpire(redisSessionDAO, -2); - redisSessionDAO.doCreate(session2); - assertEquals(getRedisTTL(prefix + session2.getId(), new StringSerializer()), 1800L); + public void testGetActiveSessions() throws SerializationException { + Set<byte[]> mockKeys = new HashSet<byte[]>(); + mockKeys.add(keySerializer.serialize("student:1")); + mockKeys.add(keySerializer.serialize("student:2")); + when(redisManager.keys(keySerializer.serialize("student:*"))).thenReturn(mockKeys); + + StudentSession mockSession1 = new StudentSession(1, 2000); + StudentSession mockSession2 = new StudentSession(2, 2000); + when(redisManager.get(keySerializer.serialize("student:1"))).thenReturn(valueSerializer.serialize(mockSession1)); + when(redisManager.get(keySerializer.serialize("student:2"))).thenReturn(valueSerializer.serialize(mockSession2)); + + RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); + assertThat(sessionDAO.getActiveSessions().size(), is(2)); } +} - @Test - public void testUpdateNull() { - try { - redisSessionDAO.update(null); - fail(); - } catch (UnknownSessionException e) { - assertEquals(e.getMessage(), "session or session id is null"); - } +class StudentSession implements Session, Serializable { + private Integer id; + private long timeout; + + public StudentSession(Integer id, long timeout) { + this.id = id; + this.timeout = timeout; } - @Test - public void testUpdateEmptySession() { - try { - redisSessionDAO.update(emptySession); - fail(); - } catch (UnknownSessionException e) { - assertEquals(e.getMessage(), "session or session id is null"); - } + @Override + public Serializable getId() { + return id; } - @Test - public void testUpdate() { - redisSessionDAO.doCreate(session1); - redisSessionDAO.doReadSession(session1.getId()); - doChangeSessionName(session1, name1); - redisSessionDAO.update(session1); - FakeSession actualSession = (FakeSession)redisSessionDAO.doReadSession(session1.getId()); - assertEquals(actualSession.getName(), name1); + @Override + public Date getStartTimestamp() { + return null; } - @Test - public void testUpdateWithoutSessionInMemory() { - redisSessionDAO.setSessionInMemoryEnabled(false); - redisSessionDAO.doCreate(session1); - redisSessionDAO.doReadSession(session1.getId()); - doChangeSessionName(session1, name1); - redisSessionDAO.update(session1); - FakeSession actualSession = (FakeSession)redisSessionDAO.doReadSession(session1.getId()); - assertEquals(actualSession.getName(), name1); + @Override + public Date getLastAccessTime() { + return null; } - @Test - public void testDelete() { - redisSessionDAO.doCreate(session1); - redisSessionDAO.delete(session1); - assertRedisEmpty(); + @Override + public long getTimeout() throws InvalidSessionException { + return timeout; } - @Test - public void testGetActiveSessions() { - redisSessionDAO.doCreate(session1); - redisSessionDAO.doCreate(session2); - Collection<Session> activeSessions = redisSessionDAO.getActiveSessions(); - assertEquals(activeSessions.size(), 2); + @Override + public void setTimeout(long l) throws InvalidSessionException { + } - @Test - public void testRemoveExpiredSessionInMemory() throws InterruptedException, SerializationException { - redisSessionDAO.setSessionInMemoryTimeout(500L); - redisSessionDAO.doCreate(session1); - redisSessionDAO.doReadSession(session1.getId()); - Thread.sleep(1000); - redisSessionDAO.doCreate(session2); - redisSessionDAO.doReadSession(session2.getId()); - Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) redisSessionDAO.getSessionsInThread().get(); - assertEquals(sessionMap.size(), 1); + @Override + public String getHost() { + return null; + } + + @Override + public void touch() throws InvalidSessionException { + + } + + @Override + public void stop() throws InvalidSessionException { + + } + + @Override + public Collection<Object> getAttributeKeys() throws InvalidSessionException { + return null; + } + + @Override + public Object getAttribute(Object o) throws InvalidSessionException { + return null; + } + + @Override + public void setAttribute(Object o, Object o1) throws InvalidSessionException { + + } + + @Override + public Object removeAttribute(Object o) throws InvalidSessionException { + return null; } }
118
- Upgrade packages version - Add integration-test - Update unit tests - Correct double checked locking
100
.java
java
mit
alexxiyang/shiro-redis
1760
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You can choose these 2 ways to include shiro-redis into your project * use "git clone https://github.com/alexxiyang/shiro-redis.git" to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled in java11 by mistake. > Please use 3.3.1 which is compiled in java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | For example: If you create SimpleAuthenticationInfo like the following: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { For example: If you create `SimpleAuthenticationInfo` like this: } ``` Then the userInfo object is your principal object. You need to make sure `UserInfo` has an unique field to identify it in Redis. Take userId as an example: ```java public class UserInfo implements Serializable{ } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` <property name="principalIdFieldName" value="userId" /> ``` Then shiro-redis will call `userInfo.getUserId()` to get the id for storing Redis object. # How to configure ? You can configure shiro-redis either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration for shiro.ini. ### Redis Standalone ```properties [main] Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` # cacheManager.principalIdFieldName = id # Use redisManager as cache manager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please change the redisManager configuration into the following: ```properties #=================================== # Redis Manager [start] Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, here is an example of configuration : ```properties #=================================== For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring ### Redis Standalone spring.xml: ```xml <!-- shiro-redis configuration [start] --> ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, here is an example of configuration : ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, here is an example of configuration : ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes to a serializer problem. Shiro-redis is using StringSerializer as key serializer and ObjectSerializer as value serializer. You can use your own custom serializer, as long as this custom serializer implemens `org.crazycake.shiro.serializer.RedisSerializer` For example, let's change the charset of keySerializer. ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 Serializers are replaceable: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options ### RedisManager - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | # Spring boot starter Shiro-redis’s Spring-Boot integration is the easiest way to integrate Shiro-redis into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the Shiro-redis Spring boot starter dependency in you application classpath ```xml <dependency> > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. Because `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. But if you've created your own `SessionManager` or `SessionsSecurityManager` as below: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff return securityManager; } ``` You will have to inject them by yourself. for more deail, see below ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` Inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; } ``` RedisCacheManager redisCacheManager; ``` Inject them into `SessionManager` and `SessionsSecurityManager` ```java @Bean RedisCacheManager redisCacheManager; ``` // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); return sessionManager; } sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); return securityManager; } ``` securityManager.setCacheManager(redisCacheManager); For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Update README.md Update README.md <DFF> @@ -8,8 +8,8 @@ shiro only provide the support of ehcache and concurrentHashMap. Here is an impl # Download -You can choose these 2 ways to include shiro-redis into your project -* use "git clone https://github.com/alexxiyang/shiro-redis.git" to clone project to your local workspace and build jar file by your self +You use either of the following 2 ways to include `shiro-redis` into your project +* use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml @@ -30,7 +30,7 @@ Here is the first thing you need to know. Shiro-redis needs an id field to ident For example: -If you create SimpleAuthenticationInfo like the following: +If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { @@ -41,7 +41,7 @@ protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) } ``` -Then the userInfo object is your principal object. You need to make sure `UserInfo` has an unique field to identify it in Redis. Take userId as an example: +Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ @@ -73,16 +73,17 @@ If you're using Spring, the configuration should be <property name="principalIdFieldName" value="userId" /> ``` -Then shiro-redis will call `userInfo.getUserId()` to get the id for storing Redis object. +Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? -You can configure shiro-redis either in `shiro.ini` or in `spring-*.xml` +You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini -Here is the configuration for shiro.ini. +Here is the configuration example for shiro.ini. ### Redis Standalone +If you are running Redis in Standalone mode ```properties [main] @@ -135,7 +136,6 @@ cacheManager = org.crazycake.shiro.RedisCacheManager # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` -# cacheManager.principalIdFieldName = id # Use redisManager as cache manager @@ -157,7 +157,7 @@ For complete configurable options list, check [Configurable Options](#configurab Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel -if you're using Redis Sentinel, please change the redisManager configuration into the following: +if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] @@ -180,7 +180,7 @@ redisManager.masterName = mymaster For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster -If you're using redis cluster, here is an example of configuration : +If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== @@ -201,9 +201,11 @@ redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.16 For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring +If you are using Spring ### Redis Standalone -spring.xml: +If you are running Redis in Standalone mode + ```xml <!-- shiro-redis configuration [start] --> @@ -245,7 +247,7 @@ For complete configurable options list, check [Configurable Options](#configurab Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel -If you use redis sentinel, here is an example of configuration : +If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> @@ -258,7 +260,7 @@ If you use redis sentinel, here is an example of configuration : For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster -If you use redis cluster, here is an example of configuration : +If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> @@ -270,11 +272,11 @@ If you use redis cluster, here is an example of configuration : For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer -Since redis only accept `byte[]`, there comes to a serializer problem. -Shiro-redis is using StringSerializer as key serializer and ObjectSerializer as value serializer. -You can use your own custom serializer, as long as this custom serializer implemens `org.crazycake.shiro.serializer.RedisSerializer` +Since redis only accept `byte[]`, there comes a serializer problem. +Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. +You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` -For example, let's change the charset of keySerializer. +For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # @@ -288,13 +290,14 @@ For example, let's change the charset of keySerializer. # cacheManager.keySerializer = $cacheManagerKeySerializer ``` -These 4 Serializers are replaceable: +These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options +Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager @@ -336,11 +339,11 @@ These 4 Serializers are replaceable: # Spring boot starter -Shiro-redis’s Spring-Boot integration is the easiest way to integrate Shiro-redis into a Spring-base application. +Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` -First include the Shiro-redis Spring boot starter dependency in you application classpath +First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> @@ -351,26 +354,24 @@ First include the Shiro-redis Spring boot starter dependency in you application ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. -Because `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. +## If you haven't created your own `SessionManager` or `SessionsSecurityManager` +If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. +So, You are all set. Enjoy it! -But if you've created your own `SessionManager` or `SessionsSecurityManager` as below: +## If you have created your own `SessionManager` or `SessionsSecurityManager` +If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); - // other stuff + + // other stuff... + return securityManager; } ``` -You will have to inject them by yourself. for more deail, see below - -## If you haven't created your own `SessionManager` or `SessionsSecurityManager` - -You are all set. Enjoy it! - -## If you have created your own `SessionManager` or `SessionsSecurityManager` -Inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already +Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @@ -379,7 +380,7 @@ RedisSessionDAO redisSessionDAO; RedisCacheManager redisCacheManager; ``` -Inject them into `SessionManager` and `SessionsSecurityManager` +Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean @@ -388,6 +389,9 @@ public SessionManager sessionManager() { // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); + + // other stuff... + return sessionManager; } @@ -400,6 +404,9 @@ public SessionsSecurityManager securityManager(List<Realm> realms, SessionManage // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); + + // other stuff... + return securityManager; } ``` @@ -407,6 +414,7 @@ public SessionsSecurityManager securityManager(List<Realm> realms, SessionManage For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties +Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------|
40
Update README.md
32
.md
md
mit
alexxiyang/shiro-redis
1761
<NME> README.rst <BEF> Hitch ===== Hitch is a loosely-coupled, isolated by design testing framework built upon python's unittest that lets you write simple, easy to read and easy to debug tests for *any* software (not just web apps and not just python apps). * `Test readability <https://hitchtest.readthedocs.org/en/latest/glossary/test_readability.html>`_ * `Loose coupling <https://hitchtest.readthedocs.org/en/latest/glossary/loose_coupling.html>`_ * `Test realism <https://hitchtest.readthedocs.org/en/latest/glossary/test_realism.html>`_ * Tests that `fail fast <https://hitchtest.readthedocs.org/en/latest/glossary/fail_fast.html>`_ and `fail clearly <https://hitchtest.readthedocs.org/en/latest/glossary/fail_clearly.html>`_ Available plugins ----------------- Hitch comes with a variety of plugins to aid you to realistically testing various kinds of software, components and scenarios, including: * `Python <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpython.html>`_ (includes Django and Celery service definitions) * `Postgresql <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpostgres.html>`_ * `Redis <https://hitchtest.readthedocs.org/en/latest/plugins/hitchredis.html>`_ * `Web apps (using selenium) <https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html>`_ * Command line apps (using pexpect) * `Cron <https://hitchtest.readthedocs.org/en/latest/plugins/hitchcron.html>`_ * MySQL * RabbitMQ * Elastic Search `Plugin documentation <https://hitchtest.readthedocs.org/en/latest/plugins/>`_ Getting started --------------- See the `quickstart tutorial <https://hitchtest.readthedocs.org/en/latest/quickstart/index.html>`_ on how to get started testing an existing project. Also check out `cookiecutter-django <https://github.com/pydanny/cookiecutter-django>`_ if you want to start a new Django project with tests. Status ------ Hitch is currently in beta. It is regression tested on: * Operating Systems : Mac OS X Yosemite, Ubuntu, Debian, Fedora and Arch Linux. * Python versions : 3.5.0, 3.4.3, 3.4.0 and 3.3.0 `(what about python 2?) <https://hitchtest.readthedocs.org/en/latest/faq/what_about_python2.html>`_ It does not currently work on Windows. See `tested on <https://hitchtest.readthedocs.org/en/latest/misc/tested_on.html>`_ for more details on how the framework is tested (with itself, naturally). Contents of this project ------------------------ This project contains: * The code for the bootstrapper script * Documentation for the whole project (`hosted at readthedocs <https://hitchtest.readthedocs.org/en/latest/>`_) * Code for other components is at: https://github.com/hitchtest/ <MSG> Merge branch 'master' of github.com:hitchtest/hitch <DFF> @@ -1,6 +1,10 @@ Hitch ===== +.. image:: https://badges.gitter.im/Join%20Chat.svg + :alt: Join the chat at https://gitter.im/hitchtest/hitch + :target: https://gitter.im/hitchtest/hitch?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + Hitch is a loosely-coupled, isolated by design testing framework built upon python's unittest that lets you write simple, easy to read and easy to debug tests for *any* software (not just web apps and not just python apps).
4
Merge branch 'master' of github.com:hitchtest/hitch
0
.rst
rst
agpl-3.0
hitchtest/hitch
1762
<NME> RedisClusterManager.java <BEF> ADDFILE <MSG> Merge branch 'master' of https://github.com/alexxiyang/shiro-redis <DFF> @@ -0,0 +1,211 @@ +package org.crazycake.shiro; + +import redis.clients.jedis.*; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class RedisClusterManager implements IRedisManager { + + protected String ip = "127.0.0.1"; + + protected String host = ip + ":" + Protocol.DEFAULT_PORT ; + + protected static final int DEFAULT_EXPIRE = 3600; + + // expire time in seconds + protected int expire = DEFAULT_EXPIRE; + + // timeout for jedis try to connect to redis server, not expire time! In milliseconds + protected int timeout = Protocol.DEFAULT_TIMEOUT; + + // timeout for jedis try to read data from redis server + protected int soTimeout = Protocol.DEFAULT_TIMEOUT; + + // requirepass + protected String password; + + // default select database + protected int database = Protocol.DEFAULT_DATABASE; + + //scan numbers each time + protected int count = 100; + + + // max attempts to connect to server + private int maxAttempts = 3; + + private volatile JedisCluster jedisCluster = null; + + private void init() { + synchronized (this) { + if (jedisCluster == null) { + jedisCluster = new JedisCluster(getHostAndPortSet(), timeout, soTimeout, maxAttempts, password, new JedisPoolConfig()); + } + } + } + + private Set<HostAndPort> getHostAndPortSet() { + String[] hostAndPortArr = host.split(","); + Set<HostAndPort> hostAndPorts = new HashSet<HostAndPort>(); + for (String hostAndPortStr : hostAndPortArr) { + String[] hostAndPort = hostAndPortStr.split(":"); + hostAndPorts.add(new HostAndPort(hostAndPort[0], Integer.parseInt(hostAndPort[1]))); + } + return hostAndPorts; + } + + + protected JedisCluster getJedisCluster() { + if (jedisCluster == null) { + init(); + } + return jedisCluster; + } + + public byte[] get(byte[] key) { + if (key == null) { + return null; + } + return getJedisCluster().get(key); + } + + public byte[] set(byte[] key, byte[] value) { + if (key == null) { + return null; + } + getJedisCluster().set(key, value); + if (this.expire != 0) { + getJedisCluster().expire(key, this.expire); + } + return value; + } + + public byte[] set(byte[] key, byte[] value, int expire) { + if (key == null) { + return null; + } + getJedisCluster().set(key, value); + if (this.expire != 0) { + getJedisCluster().expire(key, expire); + } + return value; + } + + public void del(byte[] key) { + if (key == null) { + return; + } + getJedisCluster().del(key); + } + + public Long dbSize() { + Long dbSize = 0L; + Map<String, JedisPool> clusterNodes = getJedisCluster().getClusterNodes(); + for (String k : clusterNodes.keySet()) { + JedisPool jp = clusterNodes.get(k); + Jedis connection = jp.getResource(); + try { + dbSize += connection.dbSize(); + } catch (Exception e) { + e.printStackTrace(); + } finally { + connection.close(); + } + } + return dbSize; + } + + public Set<byte[]> keys(byte[] pattern) { + Set<byte[]> keys = new HashSet<byte[]>(); + ScanParams params = new ScanParams(); + params.count(count); + params.match(pattern); + byte[] cursor = ScanParams.SCAN_POINTER_START_BINARY; + ScanResult<byte[]> scanResult; + do { + scanResult = getJedisCluster().scan(cursor, params); + keys.addAll(scanResult.getResult()); + cursor = scanResult.getCursorAsBytes(); + } while (scanResult.getStringCursor().compareTo(ScanParams.SCAN_POINTER_START) > 0); + + return keys; + } + + public int getMaxAttempts() { + return maxAttempts; + } + + public void setMaxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + } + + public String getIp() { + return ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getExpire() { + return expire; + } + + public void setExpire(int expire) { + this.expire = expire; + } + + public int getTimeout() { + return timeout; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getSoTimeout() { + return soTimeout; + } + + public void setSoTimeout(int soTimeout) { + this.soTimeout = soTimeout; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public int getDatabase() { + return database; + } + + public void setDatabase(int database) { + this.database = database; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } + + public void setJedisCluster(JedisCluster jedisCluster) { + this.jedisCluster = jedisCluster; + } +}
211
Merge branch 'master' of https://github.com/alexxiyang/shiro-redis
0
.java
java
mit
alexxiyang/shiro-redis
1763
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, help=languagestrings.SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH ) def init(python, virtualenv): """Initialize hitch in this directory.""" if virtualenv is None: if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_VIRTUALENV_INSTALLED) stderr.flush() exit(1) virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "") else: if path.exists(virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") # Store absolute directory in .hitch directory to guard against the directory being moved hitch_dir = path.abspath(".hitch") with open(path.join(hitch_dir, "absdir"), "w") as absdir_handle: absdir_handle.write(hitch_dir) pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip")) try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "-U", "pip"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> BUG : Upgrade setuptools in the hitch virtualenv to prevent bugs caused by old versions. <DFF> @@ -129,7 +129,8 @@ def init(python, virtualenv): check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) - check_call([pip, "install", "-U", "pip"]) + check_call([pip, "install", "--upgrade", "pip"]) + check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages()
2
BUG : Upgrade setuptools in the hitch virtualenv to prevent bugs caused by old versions.
1
.py
py
agpl-3.0
hitchtest/hitch
1764
<NME> engine_api.rst <BEF> Hitch Engine API ================ The Hitch Engine is a python class which is tasked with executing your tests and responding to successes and failures. For a test like this, written in YAML: .. code-block:: yaml - name: Example scenario scenario: - Do something - Do something else The basic Hitch Engine, written in python, would need to look something like this: .. code-block:: python import hitchtest class ExecutionEngine(hitchtest.ExecutionEngine): # code that always runs at the end Step Translation ---------------- Test steps and their properties are fed to the engine directly as method calls and arguments. All step names and properties are first changed into underscore_case. For example, putting this as a test step: .. code-block:: yaml - Do something Would be equivalent to calling this in your engine: .. code-block:: python self.do_something() This, on the other hand (note the semicolon): .. code-block:: yaml - Do something else: value 1 Would be translated into: .. code-block:: python self.do_something_else("value 1") You can include as many arguments as you like in steps like so: .. code-block:: yaml - Do complicated thing: Variable 1: Value 1 Variable 2: 2 If the equivalent were written in python it would look like this: .. code-block:: python self.do_complicated_thing(variable_1="Value 1", variable_2="2") Your steps can also contain arguments that contain lists: .. code-block:: yaml - Do another complicated thing: Variable 1: value 1 Variable 2: - List item 1 - List item 2 The python equivalent of that would look like this: .. code-block:: python self.do_another_complicated_thing(variable_1="value 1", variable_2=["list item 1", "list item 2",]) They can contain dicts (or associative arrays) as well: .. code-block:: yaml - A 3rd complicated thing: Variable 1: value 1 Variable 2: Dict item 1: val 1 Dict item 2: val 2 Which in python would be equivalent to this: .. code-block:: python self.a_3rd_complicated_thing(variable_1="value 1", variable_2={'Dict item 1': 'val 1', 'Dict item 2': 'val 2'}) Careful with semicolons and braces like { and } ----------------------------------------------- Since the tests are written in YAML with optional Jinja2, braces and semicolons have special meanings and must be escaped if you want to use them. Preconditions ------------- self.preconditions is a dictionary representation of the YAML snippet in the test being run. What goes in this snippet is up to you. Anything that is valid YAML is allowed. Example: .. code-block:: yaml preconditions: db_fixtures: - fixture1.sql python_version: 2.7.3 This will mean your preconditions variable will be:: In [1]: self.preconditions Out[1]: {'db_fixtures': ['fixture1.sql'], 'python_version': '2.7.3'} You can access any properties you set here using python's get method (which you can also use to program in a sensible default):: In [1]: self.preconditions.get('db_fixtures', []) Out[1]: ['fixture1.sql'] If no preconditions are set, self.preconditions will be an empty dict:: In [1]: self.preconditions Out[1]: {} Note that while preconditions can contain lists, you can't set preconditions to be a list. Tags ---- Tests can also have tags, which let you single out individual tests to run or to run groups of tests together. Example: .. code-block:: yaml - name: Test with tags tags: - registration - email - firefox scenario: - Step 1 - Step 2 You can use these tags to run related sets of tests together like so:: $ hitch test . --tags registration Or, if you want to be more specific, you can list the tags, separated by a comma:: $ hitch test . --tags registration,email,firefox Description ----------- You can also include comments in the description property. This where you can put comments in your tests to help explain to people what your test is doing and why. It is ignored by the engine. .. code-block:: yaml - name: Test with long description description: | This test has a long history behind it. First there was a feature, then ther was another bug BUG-431, which it was tweaked to accomodate. It registers, recieves an email and checks the email arrived. scenario: - Step 1 - Step 2: with parameter - Step 3: var 1: 1 var 2: 2 var 3: 3 - Last step Stacktrace ---------- self.stacktrace is an object representation of the stack trace that occurs after a failure occurs in your test. It is set to None if no error has occurred while running the test. You can use it to pretty-print a representation of the last error that occurred:: In [1]: print(self.stacktrace.to_template()) [ prints colorized, pretty printed version of the stacktrace ] You can also use it to *dive into* the specific engine code where the exception occurred, so that you can check the contents of variables at that point or even re-run the code:: In [1]: self.stacktrace[0].ipython() Entering /home/user/django-remindme/django-remindme-tests/engine.py at line 122 In [1]: on Out[1]: 'register' Settings -------- Test settings are also available in the test engine, e.g.:: In [1]: self.settings Out[1]: {'engine_folder': '/home/user/django-remindme/django-remindme-tests', 'pause_on_failure': True, 'python_version': '2.7.3', 'xvfb': False, 'quiet': False} To read more about setting settings see :doc:`settings`. <MSG> DOCS : Tidied up engine API docs. <DFF> @@ -24,11 +24,21 @@ The basic Hitch Engine looks something like this: # code that always runs at the end +For a test like this: + +.. code-block:: yaml + + - name: Example scenario + scenario: + - Do something + - Do something else + + Step Translation ---------------- -Test steps and their properties are fed to the engine directly as method calls -and arguments. All step names and properties are first changed into underscore_case. +Test steps and their arguments are fed to the engine directly as method calls +and arguments. All step names and arguments are first changed into underscore_case. For example, putting this as a test step:
12
DOCS : Tidied up engine API docs.
2
.rst
rst
agpl-3.0
hitchtest/hitch
1765
<NME> why_is_hitch_so_fast.rst <BEF> Why is hitch so fast? ===================== There are two main reasons why Hitch is generally faster than other testing frameworks: parallelization and built in epoll/kqueue triggers. Automatic Parallelization ------------------------- When hitch services are started they are started in parallel by default. If you have seven services (like the example project), hitch will try to start all of the services that do not have a "needs" property set. As soon as services are ready that are prerequisites of other services, those will be started. This means two things: even very complex service architectures can be started extremely quickly and also that your test speed will increase substantially the more CPU power, RAM and CPU cores you have. As an example, the django-remindme-tests project runs the following services: * Postgresql (including running initdb to create all necessary database files and creating a user and database after service start-up) * Django (including installing fixtures and running migrations) * Mock Cron server * Mock SMTP server * Celery * Redis * Selenium (running and connecting to firefox). When tested on a laptop with an SSD, and an i5 processor with 4 CPU cores, just starting firefox takes 4.5 seconds. *All* of the above, when parallelized, takes between 5.1 and 5.8 seconds. Epoll/Kqueue Triggers --------------------- A common feature of other testing frameworks is the use of 'sleeps' and polling to determine if an event has occurred. This can not only contribute to test indeterminacy, it slows down your integration tests. A feature of hitch that contributes to its speed is the in-built use of epoll/kqueue triggers. These are kernel features in Linux, FreeBSD and Mac OS X that allow 'watches' to be put on files. When a file is changed, the test is automatically notified without the need for polling. This is used in the following situations: * To ascertain service readiness - the instant that Postgresql logs the line "database system is ready to accept connections", for example, Hitch will move straight on to creating users and databases. * Mock service interactions - the instant that the mock SMTP server receives an email, it logs out a snippet of JSON. The watcher on the mock SMTP logs receives the epoll trigger during that split second and the test can continue. <MSG> DOCS : Fixed line endings on latest FAQ. <DFF> @@ -20,8 +20,7 @@ substantially the more CPU power, RAM and CPU cores you have. As an example, the django-remindme-tests project runs the following services: -* Postgresql (including running initdb to create all necessary database -files and creating a user and database after service start-up) +* Postgresql (including running initdb to create all necessary database files and creating a user and database after service start-up) * Django (including installing fixtures and running migrations) * Mock Cron server * Mock SMTP server @@ -48,12 +47,7 @@ test is automatically notified without the need for polling. This is used in the following situations: -* To ascertain service readiness - the instant that Postgresql logs the -line "database system is ready to accept connections", for example, Hitch -will move straight on to creating users and databases. +* To ascertain service readiness - the instant that Postgresql logs the line "database system is ready to accept connections", for example, Hitch will move straight on to creating users and databases. -* Mock service interactions - the instant that the mock SMTP server -receives an email, it logs out a snippet of JSON. The watcher on the mock -SMTP logs receives the epoll trigger during that split second and the test -can continue. +* Mock service interactions - the instant that the mock SMTP server receives an email, it logs out a snippet of JSON. The watcher on the mock SMTP logs receives the epoll trigger during that split second and the test can continue.
3
DOCS : Fixed line endings on latest FAQ.
9
.rst
rst
agpl-3.0
hitchtest/hitch
1766
<NME> what_does_the_init_script_do.rst <BEF> What does the init script do? ============================= The init script is a one step method of setting up a hitch environment and running all the tests in a directory. It is intended to be a low friction way of getting a development and testing environment up and running. If you'd prefer instead to perform the steps manually, you can use this as a guide. 1. Installs python, pip, virtualenv, python-dev, automake and libtool (requires sudo) ------------------------------------------------------------------------------------- On Ubuntu/Debian:: $ sudo apt-get install -y python python3 python-dev python-setuptools python-virtualenv python3-dev automake libtool On Fedora/Red Hat/CentOS:: $ yum -y install python python-devel python-setuptools python-virtualenv python-pip python3 python3-devel automake libtool gcc-c++ On Arch:: $ pacman -Sy python python-setuptools python-virtualenv python automake libtool On Mac OS X:: $ brew install python python3 libtool automake cmake $ pip install --upgrade pip setuptools virtualenv 2. Install or upgrades the hitch bootstrap script (may require sudo) -------------------------------------------------------------------- On the Mac it will run:: $ pip install --upgrade hitch Or on Linux:: $ sudo pip install --upgrade hitch This is a small python script with zero dependencies. See also: * :doc:`/faq/what_does_the_hitch_bootstrap_script_do` 3. Runs "hitch clean" and "hitch init" in the current directory (does not require sudo) --------------------------------------------------------------------------------------- If no hitch environment is already installed then this command does nothing. If a .hitch directory *is* found, it will remove it:: $ hitch clean This creates a .hitch directory in the current directory, where all of the packages required to run tests will be installed in a python virtualenv:: $ hitch init * :doc:`/faq/what_does_hitch_init_do` 4. Run "hitch test ." if tests are found (may require sudo) ----------------------------------------------------------- If there are tests in the directory where the init script is run, hitch will run all of them. During the course of running the tests, the test may attempt to use sudo to install necessary packages. It will always print the exact command it is trying to run (e.g. sudo apt-get install xvfb). If the packages are already installed, hitch will not attempt to install them. See also: * :doc:`why_does_my_test_require_me_to_sudo_and_install_packages` During the course of running the tests it will also attempt to download and compile certain pieces of software (e.g. postgres). The software will be installed in the "~/.hitchpkg" directory. Doing this does not require root and it will not interfere at all with other software you may have installed. See also: * :doc:`why_is_my_test_downloading_and_compiling_software` * :doc:`why_does_the_first_test_run_take_so_long` All software installed there can be easily removed by deleting the "~/.hitchpkg" directory or running the command "hitch cleanpkg". If a test does not detect its presence it will download and compile it again. See also: * :doc:`how_do_i_uninstall_hitch_completely` <MSG> DOCS : Updated the FAQ about the init script. <DFF> @@ -1,15 +1,31 @@ What does the init script do? ============================= +.. note:: + + This script tries to respect your existing environment as much as possible and + avoids the use of sudo except where necessary to install packages via your + system's package manager. + The init script is a one step method of setting up a hitch environment and running -all the tests in a directory. It is intended to be a low friction way of getting a -development and testing environment up and running. +all the tests in a directory. It is intended to be a low friction way of: + +* Getting a CI or test driven development environment up and running. +* Rebuilding an environment from scratch that may have been broken. + +If you'd prefer instead to perform the steps manually, you can use this document +as a guide. + +Note that the first three steps take about 5 minutes and the last step can take +roughly 15 minutes (or longer, sometimes). -If you'd prefer instead to perform the steps manually, you can use this as a guide. +1. Installs python, pip, virtualenv, python-dev, automake and libtool (may require sudo) +---------------------------------------------------------------------------------------- -1. Installs python, pip, virtualenv, python-dev, automake and libtool (requires sudo) -------------------------------------------------------------------------------------- +Takes approximately: 1 minute + +These packages are required for hitch to initialize. On Ubuntu/Debian:: @@ -17,11 +33,11 @@ On Ubuntu/Debian:: On Fedora/Red Hat/CentOS:: - $ yum -y install python python-devel python-setuptools python-virtualenv python-pip python3 python3-devel automake libtool gcc-c++ + $ sudo yum -y install python python-devel python-setuptools python-virtualenv python-pip python3 python3-devel automake libtool gcc-c++ On Arch:: - $ pacman -Sy python python-setuptools python-virtualenv python automake libtool + $ sudo pacman -Sy python python-setuptools python-virtualenv python automake libtool On Mac OS X:: @@ -33,67 +49,71 @@ On Mac OS X:: 2. Install or upgrades the hitch bootstrap script (may require sudo) -------------------------------------------------------------------- -On the Mac it will run:: +Takes approximately: 5 seconds + +This is a small python script with no dependencies that bootstraps your testing +environment and lets you trigger test runs. It installs a single command ('hitch') +on your system's path. + +On the Mac the init script will run:: $ pip install --upgrade hitch -Or on Linux:: +On Linux:: $ sudo pip install --upgrade hitch -This is a small python script with zero dependencies. - See also: * :doc:`/faq/what_does_the_hitch_bootstrap_script_do` -3. Runs "hitch clean" and "hitch init" in the current directory (does not require sudo) ---------------------------------------------------------------------------------------- +3. Runs "hitch clean", "hitch cleanpkg" and "hitch init" in the current directory (may require sudo) +---------------------------------------------------------------------------------------------------- -If no hitch environment is already installed then this command does nothing. If a .hitch +Takes approximately: 2 minutes + +If no ".hitch" directory is already installed then this command does nothing. If a .hitch directory *is* found, it will remove it:: $ hitch clean -This creates a .hitch directory in the current directory, where all of the -packages required to run tests will be installed in a python virtualenv:: +If no "~/.hitchpkg" directory is found, this will also do nothing. If you already used hitch +before you may have packages downloaded into this directory, in which case it will destroy it +so it can be rebuilt:: - $ hitch init + $ hitch cleanpkg +This builds a .hitch directory in the current directory and installs any more required +system packages via unixpackage. This asks to install system packages specified in +hitch plugins and packages specified in the system.packages file:: -* :doc:`/faq/what_does_hitch_init_do` - + $ hitch init -4. Run "hitch test ." if tests are found (may require sudo) ------------------------------------------------------------ -If there are tests in the directory where the init script is run, hitch will run all -of them. +* :doc:`/faq/what_does_hitch_init_do` -During the course of running the tests, the test may attempt to use sudo to install -necessary packages. It will always print the exact command it is trying to run -(e.g. sudo apt-get install xvfb). -If the packages are already installed, hitch will not attempt to install them. +4. Run "hitch test ." to run all tests (does not require sudo) +-------------------------------------------------------------- -See also: +Takes approximately: 15 minutes (subsequent test runs will be quicker) -* :doc:`why_does_my_test_require_me_to_sudo_and_install_packages` +If there are tests in the directory where the init script is run, it will run all +of them. -During the course of running the tests it will also attempt to download and compile +During the course of running the tests it will attempt to download and compile certain pieces of software (e.g. postgres). The software will be installed in the -"~/.hitchpkg" directory. Doing this does not require root and it will not interfere -at all with other software you may have installed. +"~/.hitchpkg" directory. This does not require sudo and it will not interfere +with software you may already have installed. See also: * :doc:`why_is_my_test_downloading_and_compiling_software` * :doc:`why_does_the_first_test_run_take_so_long` -All software installed there can be easily removed by deleting the "~/.hitchpkg" -directory or running the command "hitch cleanpkg". If a test does not detect its -presence it will download and compile it again. +All software installed there can easily be removed by deleting the "~/.hitchpkg" +directory or running the command "hitch cleanpkg". See also:
55
DOCS : Updated the FAQ about the init script.
35
.rst
rst
agpl-3.0
hitchtest/hitch
1767
<NME> LICENSE <BEF> ADDFILE <MSG> Merge branch 'HEAD' of https://github.com/alexxiyang/shiro-redis.git <DFF> @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 xi yang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file
21
Merge branch 'HEAD' of https://github.com/alexxiyang/shiro-redis.git
0
LICENSE
mit
alexxiyang/shiro-redis
1768
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.apache.shiro.session.mgt.eis.AbstractSessionDAO; import org.crazycake.shiro.common.SessionInMemory; import org.crazycake.shiro.exception.SerializationException; import org.crazycake.shiro.serializer.ObjectSerializer; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * Used for setting/getting authentication information from Redis */ public class RedisSessionDAO extends AbstractSessionDAO { private static Logger logger = LoggerFactory.getLogger(RedisSessionDAO.class); private static final String DEFAULT_SESSION_KEY_PREFIX = "shiro:session:"; private String keyPrefix = DEFAULT_SESSION_KEY_PREFIX; /** * doReadSession be called about 10 times when login. * Save Session in ThreadLocal to resolve this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; private int expire = DEFAULT_EXPIRE; private static final int MILLISECONDS_IN_A_SECOND = 1000; /** * redisManager used for communicate with Redis */ private IRedisManager redisManager; /** * Serializer of key */ private RedisSerializer keySerializer = new StringSerializer(); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { key = keySerializer.serialize(getRedisSessionKey(session.getId())); value = valueSerializer.serialize(session); } catch (SerializationException e) { logger.error("serialize session error. session id=" + session.getId()); throw new UnknownSessionException(e); } if (expire == DEFAULT_EXPIRE) { redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * MILLISECONDS_IN_A_SECOND) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); } redisManager.set(key, value, expire); } /** * delete session * @param session */ @Override public void delete(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null || session.getId() == null) { logger.error("session or session id is null"); return; } if (this.sessionInMemoryEnabled) { this.delSessionFromThreadLocal(session.getId()); } try { redisManager.del(keySerializer.serialize(getRedisSessionKey(session.getId()))); } catch (SerializationException e) { logger.error("delete session error. session id=" + session.getId()); } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. settionId=" + sessionId); } return session; } return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Merge pull request #107 from qixiaobo/fix-typo 修改拼写错误 <DFF> @@ -188,7 +188,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { - logger.error("read session error. settionId=" + sessionId); + logger.error("read session error. sessionId=" + sessionId); } return session; }
1
Merge pull request #107 from qixiaobo/fix-typo
1
.java
java
mit
alexxiyang/shiro-redis
1769
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation * directly download jar file Download shiro-redis.jar in bin folder and add it into your classpath. * add maven dependency ------------------------------------ <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>2.4.2-RELEASE</version> </dependency> ------------------------------------ Edit shiro.ini ```properties <MSG> Update README.md update readme <DFF> @@ -10,14 +10,14 @@ You can chose these 2 ways to include shiro-redis into your project * directly download jar file Download shiro-redis.jar in bin folder and add it into your classpath. * add maven dependency - ------------------------------------- +```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>2.4.2-RELEASE</version> </dependency> ------------------------------------- +```xml + Edit shiro.ini ```properties
3
Update README.md
3
.md
md
mit
alexxiyang/shiro-redis
1770
<NME> README.md <BEF> shiro-redis ============= view [Documentation](http://alexxiyang.github.io/shiro-redis/) Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). <MSG> Use github Page <DFF> @@ -1,4 +1,9 @@ shiro-redis ============= +[![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) + +shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! + + view [Documentation](http://alexxiyang.github.io/shiro-redis/)
5
Use github Page
0
.md
md
mit
alexxiyang/shiro-redis
1771
<NME> index.rst <BEF> Hitch Quickstart ================ This is a basic introduction to getting your first test up and running. Prerequisites ------------- To begin, the minimum you need to have python3 and virtualenv installed on your system. On Ubuntu:: $ sudo apt-get install python3 python-virtualenv On a Mac:: $ brew install python3 $ pip install -U setuptools pip virtualenv Install ------- The first thing that you need to install after this is the hitch bootstrap script:: $ pip install hitch or:: $ sudo pip install hitch See :doc:`faq/why_install_hitch_on_the_system_path`. Create your test directory -------------------------- First create a directory inside your project to put your tests in. For example:: $ mkdir tests $ cd tests Inside this, directory, run the following command to initialize hitch. $ hitch init This will create a file called hitchreqs.txt, which contains a list of pypi requirements to use in the hitch virtualenv. These are the packages required to run your *testing* code - there is no need to add packages here which your application needs to run. It will run in its own segregated virtualenv. The .hitch directory contains all the necessary generated files required to run your tests, including the testing virtualenv. This directory should be gitignored. If you delete it, you can regenerate it just by running hitch init again. Create your first test and engine --------------------------------- To run your first test, you need an engine. An engine file simply contains a class with a lot of methods that your tests can invoke. Create an engine called 'engine.py' like so:: import hitchtest from os import path PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..')) class YourProjectTestExecutionEngine(hitchtest.ExecutionEngine): def set_up(self): pass def pause(self): hitchtest.ipython_embed() def tear_down(self): pass And a test called 'stub.test', written in YAML, like so:: - name: Stub engine: engine.py:YourProjectTestExecutionEngine scenario: - Pause You can run this test by running the command inside your tests directory:: $ hitch test stub.test And voila, you should see an IPython prompt. It runs "set_up", followed by "pause" (as specified in the scenario), which enters IPython and finally runs "tear_down". You can exit the IPython prompt by typing ctrl-D. Now that you have the skeleton of a test, you can continue building the other necessary parts of your testing infrastructure. Contents: Install prerequisites --------------------- You should have a reasonably up to date Ubuntu, Debian, Arch, Fedora or Mac. On Ubuntu/Debian:: $ sudo apt-get install python3 python-pip python-virtualenv $ sudo pip install --upgrade hitch On Mac OS X:: $ brew install python python3 $ pip install --upgrade hitch virtualenv On Arch:: $ sudo pacman -Sy python python-virtualenv $ sudo pip install --upgrade hitch On Fedora/RHEL/CentOS:: $ sudo yum install python3 python-virtualenv python-pip python3 $ sudo pip install --upgrade hitch .. note:: The 'hitch' package (the bootstrapper) is a small python package with no dependencies. Create your test directory -------------------------- Create a directory inside the root of your project to put your tests in. For example:: ~/yourproject$ mkdir tests ~/yourproject$ cd tests ~/yourproject/tests$ If you already have a tests directory you can call it something else. Create the hitch environment ---------------------------- To initialize a hitch environment, run hitch init in your tests directory:: ~/yourproject/tests$ hitch init This will: * Install any necessary system packages required to run hitch. * Create a .hitch directory, create a python 3 virtualenv in it and install all the necessary packages to run hitch tests there. * Ask you some basic questions about the project which you are testing. * Create a skeleton hitch project template for you to use based upon the answers. The skeleton template will include all of the following: * :doc:`/glossary/hitchreqs.txt` * :doc:`/glossary/engine.py` * tdd.settings (:doc:`/glossary/hitch_settings`) * ci.settings * all.settings * :doc:`/glossary/stub.test` * README.rst You might want to take a look around these files. They all try to be self-explanatory. Running your first test ----------------------- You can now run the stub test. Try running it in test driven development mode:: $ hitch test stub.test --settings tdd.settings The first time you run this command it *may take a while* (up to 25 minutes depending upon what you answered). .. note:: :doc:`/faq/why_does_the_first_test_run_take_so_long` This might be a good time to take a break. While you're at it, subscribe to the `hitch subreddit <https://reddit.com/r/hitchtest>`_ and `twitter feed <https://twitter.com/testhitch>`_ for updates and news. Back? ----- .. note:: If the stub test failed, please `raise an issue <https://github.com/hitchtest/hitch/issues/new>`_. Once the test run is done setting up, if there were no problems, you should see this:: Python 3.4.3 (default, Jul 28 2015, 18:20:59) Type "copyright", "credits" or "license" for more information. IPython 4.0.0 -- An enhanced Interactive Python. ? -> Introduction and overview of IPython's features. %quickref -> Quick reference. help -> Python's own help system. object? -> Details about 'object', use 'object??' for extra details. SUCCESS In [1]: This is the interactive prompt that appears during the pause step. This is an :doc:`/glossary/ipython` prompt that can be used to interact with your app, inspect logs and try out test steps. The components you selected during the set up should also be running. For example, if you chose postgres, the latest version of postgres will have been installed in the ~/.hitchpkg directory and it will be running and accessible. To exit, simply hit ctrl-D. This will shut everything down and then quit. You're now ready to start writing new tests. Happy testing! .. note:: Was there anything that went wrong or was confusing? Please tell us! Help with :doc:`/misc/clarifying_documentation`. Further reading --------------- * :doc:`/howto/web_applications` * :doc:`/howto/command_line_applications` Advanced topics --------------- * :doc:`/howto/test_driven_development` * :doc:`/howto/parameterize_test_cases` * :doc:`/howto/external_apis` * :doc:`/howto/continuous_integration` Plugin Documentation -------------------- .. toctree:: :glob: :maxdepth: 1 /plugins/* .. note:: Need tutorials for any other topics? `Please raise a ticket <https://github.com/hitchtest/hitch/issues/new>`_. <MSG> DOCS : Overhaul of docs. <DFF> @@ -1,107 +1,5 @@ -Hitch Quickstart -================ - -This is a basic introduction to getting your first test up and running. - -Prerequisites -------------- - -To begin, the minimum you need to have python3 and virtualenv installed on your system. - -On Ubuntu:: - - $ sudo apt-get install python3 python-virtualenv - -On a Mac:: - - $ brew install python3 - - $ pip install -U setuptools pip virtualenv - -Install -------- - -The first thing that you need to install after this is the hitch bootstrap -script:: - - $ pip install hitch - -or:: - - $ sudo pip install hitch - - -See :doc:`faq/why_install_hitch_on_the_system_path`. - - -Create your test directory --------------------------- - -First create a directory inside your project to put your tests in. For example:: - - $ mkdir tests - $ cd tests - -Inside this, directory, run the following command to initialize hitch. - - $ hitch init - -This will create a file called hitchreqs.txt, which contains a list of -pypi requirements to use in the hitch virtualenv. These are the packages -required to run your *testing* code - there is no need to add packages -here which your application needs to run. It will run in its own segregated -virtualenv. - -The .hitch directory contains all the necessary generated files -required to run your tests, including the testing virtualenv. - -This directory should be gitignored. If you delete it, you can regenerate -it just by running hitch init again. - -Create your first test and engine ---------------------------------- - -To run your first test, you need an engine. An engine file simply contains -a class with a lot of methods that your tests can invoke. - -Create an engine called 'engine.py' like so:: - - import hitchtest - from os import path - - PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..')) - - class YourProjectTestExecutionEngine(hitchtest.ExecutionEngine): - def set_up(self): - pass - - def pause(self): - hitchtest.ipython_embed() - - def tear_down(self): - pass - -And a test called 'stub.test', written in YAML, like so:: - - - name: Stub - engine: engine.py:YourProjectTestExecutionEngine - scenario: - - Pause - -You can run this test by running the command inside your tests directory:: - - $ hitch test stub.test - -And voila, you should see an IPython prompt. - -It runs "set_up", followed by "pause" (as specified in the scenario), which -enters IPython and finally runs "tear_down". - -You can exit the IPython prompt by typing ctrl-D. - -Now that you have the skeleton of a test, you can continue building the -other necessary parts of your testing infrastructure. - +Quickstart +========== Contents:
2
DOCS : Overhaul of docs.
104
.rst
rst
agpl-3.0
hitchtest/hitch
1772
<NME> hitchvagrant.rst <BEF> ADDFILE <MSG> DOCS : Updates to docs <DFF> @@ -0,0 +1,40 @@ +Vagrant +======= + +.. note:: + + This documentation applies to the latest version of hitchvagrant. + + +Installation +------------ + +If it is not already installed, install the hitch vagrant package:: + + $ hitch install hitchvagrant + + +Setup +----- + +To use, define the service after initializing the :doc:`/api/service_bundle`: + +Like so: + +.. code-block:: python + + import hitchvagrant + + # Service definition in engine's setUp: + self.services['MyVM'] = hitchvagrant.VagrantService( + directory="vagrantubuntu/", # Directory containing Vagrantfile (optional) + ) + + +Interaction +----------- + +Once it is running, you can run ssh commands against the machine:: + + In [1]: self.services['MyVM'].ssh("pwd").run() + /vagrant
40
DOCS : Updates to docs
0
.rst
rst
agpl-3.0
hitchtest/hitch
1773
<NME> RedisSessionDAO.java <BEF> package org.crazycake.shiro; import java.io.Serializable; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import org.apache.shiro.session.Session; import org.crazycake.shiro.serializer.RedisSerializer; import org.crazycake.shiro.serializer.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.*; /** * shiro-redis的session对象前缀 */ private final String SHIRO_REDIS_SESSION_PRE = "shiro_redis_session:"; private RedisManager redisManager; @Override public void update(Session session) throws UnknownSessionException { this.saveSession(session); * The default value is 1000 milliseconds (1s). * Most of time, you don't need to change it. * * You can turn it off by setting sessionInMemoryEnabled to false */ private static final long DEFAULT_SESSION_IN_MEMORY_TIMEOUT = 1000L; private long sessionInMemoryTimeout = DEFAULT_SESSION_IN_MEMORY_TIMEOUT; private static final boolean DEFAULT_SESSION_IN_MEMORY_ENABLED = true; private boolean sessionInMemoryEnabled = DEFAULT_SESSION_IN_MEMORY_ENABLED; private static ThreadLocal sessionsInThread = new ThreadLocal(); /** * expire time in seconds. * NOTE: Please make sure expire is longer than session.getTimeout(), * otherwise you might need the issue that session in Redis got erased when the Session is still available * * DEFAULT_EXPIRE: use the timeout of session instead of setting it by yourself * NO_EXPIRE: never expire */ private static final int DEFAULT_EXPIRE = -2; private static final int NO_EXPIRE = -1; private int expire = DEFAULT_EXPIRE; private static final int MILLISECONDS_IN_A_SECOND = 1000; /** * redisManager used for communicate with Redis */ public Collection<Session> getActiveSessions() { Set<Session> sessions = new HashSet<Session>(); Set<byte[]> keys = redisManager.keys(this.SHIRO_REDIS_SESSION_PRE + "*"); if(keys != null && keys.size()>0){ for(byte[] key:keys){ Session s = (Session)SerializeUtils.deserialize(redisManager.get(key)); /** * Serializer of value */ private RedisSerializer valueSerializer = new ObjectSerializer(); /** * save/update session * @param session * @throws UnknownSessionException */ @Override public void update(Session session) throws UnknownSessionException { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } this.saveSession(session); if (this.sessionInMemoryEnabled) { this.setSessionToThreadLocal(session.getId(), session); } } private void saveSession(Session session) throws UnknownSessionException { if (session == null || session.getId() == null) { logger.error("session or session id is null"); throw new UnknownSessionException("session or session id is null"); } byte[] key; byte[] value; try { * @return */ private byte[] getByteKey(Serializable sessionId){ String preKey = this.SHIRO_REDIS_SESSION_PRE + sessionId; return preKey.getBytes(); } redisManager.set(key, value, (int) (session.getTimeout() / MILLISECONDS_IN_A_SECOND)); return; } if (expire != NO_EXPIRE && expire * MILLISECONDS_IN_A_SECOND < session.getTimeout()) { logger.warn("Redis session expire time: " + (expire * MILLISECONDS_IN_A_SECOND) + " is less than Session timeout: " + session.getTimeout() + " . It may cause some problems."); */ this.redisManager.init(); } } } } /** * get all active sessions * @return */ @Override public Collection<Session> getActiveSessions() { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } Set<Session> sessions = new HashSet<Session>(); try { Set<byte[]> keys = redisManager.keys(keySerializer.serialize(this.keyPrefix + "*")); if (keys != null && keys.size() > 0) { for (byte[] key:keys) { Session s = (Session) valueSerializer.deserialize(redisManager.get(key)); sessions.add(s); } } } catch (SerializationException e) { logger.error("get active sessions error."); } return sessions; } @Override protected Serializable doCreate(Session session) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (session == null) { logger.error("session is null"); throw new UnknownSessionException("session is null"); } Serializable sessionId = this.generateSessionId(session); this.assignSessionId(session, sessionId); this.saveSession(session); return sessionId; } /** * I change * @param sessionId * @return */ @Override protected Session doReadSession(Serializable sessionId) { if (this.sessionInMemoryEnabled) { this.removeExpiredSessionInMemory(); } if (sessionId == null) { logger.warn("session id is null"); return null; } if (this.sessionInMemoryEnabled) { Session session = getSessionFromThreadLocal(sessionId); if (session != null) { return session; } } Session session = null; try { String sessionRedisKey = getRedisSessionKey(sessionId); logger.debug("read session: " + sessionRedisKey + " from Redis"); session = (Session) valueSerializer.deserialize(redisManager.get(keySerializer.serialize(sessionRedisKey))); if (this.sessionInMemoryEnabled) { setSessionToThreadLocal(sessionId, session); } } catch (SerializationException e) { logger.error("read session error. sessionId: " + sessionId); } return session; } private void setSessionToThreadLocal(Serializable sessionId, Session session) { this.initSessionsInThread(); Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); sessionMap.put(sessionId, this.createSessionInMemory(session)); } private void delSessionFromThreadLocal(Serializable sessionId) { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } sessionMap.remove(sessionId); } private SessionInMemory createSessionInMemory(Session session) { SessionInMemory sessionInMemory = new SessionInMemory(); sessionInMemory.setCreateTime(new Date()); sessionInMemory.setSession(session); return sessionInMemory; } private void initSessionsInThread() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { sessionMap = new HashMap<Serializable, SessionInMemory>(); sessionsInThread.set(sessionMap); } } private void removeExpiredSessionInMemory() { Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); if (sessionMap == null) { return; } Iterator<Serializable> it = sessionMap.keySet().iterator(); while (it.hasNext()) { Serializable sessionId = it.next(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { it.remove(); continue; } long liveTime = getSessionInMemoryLiveTime(sessionInMemory); if (liveTime > sessionInMemoryTimeout) { it.remove(); } } if (sessionMap.size() == 0) { sessionsInThread.remove(); } } private Session getSessionFromThreadLocal(Serializable sessionId) { if (sessionsInThread.get() == null) { return null; } Map<Serializable, SessionInMemory> sessionMap = (Map<Serializable, SessionInMemory>) sessionsInThread.get(); SessionInMemory sessionInMemory = sessionMap.get(sessionId); if (sessionInMemory == null) { return null; } logger.debug("read session from memory"); return sessionInMemory.getSession(); } private long getSessionInMemoryLiveTime(SessionInMemory sessionInMemory) { Date now = new Date(); return now.getTime() - sessionInMemory.getCreateTime().getTime(); } private String getRedisSessionKey(Serializable sessionId) { return this.keyPrefix + sessionId; } public IRedisManager getRedisManager() { return redisManager; } public void setRedisManager(IRedisManager redisManager) { this.redisManager = redisManager; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public RedisSerializer getKeySerializer() { return keySerializer; } public void setKeySerializer(RedisSerializer keySerializer) { this.keySerializer = keySerializer; } public RedisSerializer getValueSerializer() { return valueSerializer; } public void setValueSerializer(RedisSerializer valueSerializer) { this.valueSerializer = valueSerializer; } public long getSessionInMemoryTimeout() { return sessionInMemoryTimeout; } public void setSessionInMemoryTimeout(long sessionInMemoryTimeout) { this.sessionInMemoryTimeout = sessionInMemoryTimeout; } public int getExpire() { return expire; } public void setExpire(int expire) { this.expire = expire; } public boolean getSessionInMemoryEnabled() { return sessionInMemoryEnabled; } public void setSessionInMemoryEnabled(boolean sessionInMemoryEnabled) { this.sessionInMemoryEnabled = sessionInMemoryEnabled; } public static ThreadLocal getSessionsInThread() { return sessionsInThread; } } <MSG> Added support for explicit declaration of Redis key prefixes for both session and cache keys. <DFF> @@ -3,7 +3,6 @@ package org.crazycake.shiro; import java.io.Serializable; import java.util.Collection; import java.util.HashSet; -import java.util.Iterator; import java.util.Set; import org.apache.shiro.session.Session; @@ -18,10 +17,13 @@ public class RedisSessionDAO extends AbstractSessionDAO { /** * shiro-redis的session对象前缀 */ - private final String SHIRO_REDIS_SESSION_PRE = "shiro_redis_session:"; - private RedisManager redisManager; + /** + * The Redis key prefix for the sessions + */ + private String keyPrefix = "shiro_redis_session:"; + @Override public void update(Session session) throws UnknownSessionException { this.saveSession(session); @@ -58,7 +60,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { public Collection<Session> getActiveSessions() { Set<Session> sessions = new HashSet<Session>(); - Set<byte[]> keys = redisManager.keys(this.SHIRO_REDIS_SESSION_PRE + "*"); + Set<byte[]> keys = redisManager.keys(this.keyPrefix + "*"); if(keys != null && keys.size()>0){ for(byte[] key:keys){ Session s = (Session)SerializeUtils.deserialize(redisManager.get(key)); @@ -94,7 +96,7 @@ public class RedisSessionDAO extends AbstractSessionDAO { * @return */ private byte[] getByteKey(Serializable sessionId){ - String preKey = this.SHIRO_REDIS_SESSION_PRE + sessionId; + String preKey = this.keyPrefix + sessionId; return preKey.getBytes(); } @@ -110,5 +112,24 @@ public class RedisSessionDAO extends AbstractSessionDAO { */ this.redisManager.init(); } + + /** + * Returns the Redis session keys + * prefix. + * @return The prefix + */ + public String getKeyPrefix() { + return keyPrefix; + } + + /** + * Sets the Redis sessions key + * prefix. + * @param keyPrefix The prefix + */ + public void setKeyPrefix(String keyPrefix) { + this.keyPrefix = keyPrefix; + } + }
26
Added support for explicit declaration of Redis key prefixes for both session and cache keys.
5
.java
java
mit
alexxiyang/shiro-redis
1774
<NME> README.md <BEF> shiro-redis ============= ## Introduction How to use it? =========== edit in shiro.ini ```properties <MSG> Update README.md <DFF> @@ -6,6 +6,8 @@ shiro only provide the support of ehcache and concurrentHashMap. Here is an impl How to use it? =========== +copy /bin/shiro-redis.jar to your classpath, such as 'webapp/WEB-INF/lib' + edit in shiro.ini ```properties
2
Update README.md
0
.md
md
mit
alexxiyang/shiro-redis
1775
<NME> SerializeUtils.java <BEF> ADDFILE <MSG> modify jedis usage to JedisPool <DFF> @@ -0,0 +1,85 @@ +package org.crazycake.shiro; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SerializeUtils { + + private static Logger logger = LoggerFactory.getLogger(SerializeUtils.class); + + /** + * 反序列化 + * @param bytes + * @return + */ + public static Object deserialize(byte[] bytes) { + + Object result = null; + + if (isEmpty(bytes)) { + return null; + } + + try { + ByteArrayInputStream byteStream = new ByteArrayInputStream(bytes); + try { + ObjectInputStream objectInputStream = new ObjectInputStream(byteStream); + try { + result = objectInputStream.readObject(); + } + catch (ClassNotFoundException ex) { + throw new Exception("Failed to deserialize object type", ex); + } + } + catch (Throwable ex) { + throw new Exception("Failed to deserialize", ex); + } + } catch (Exception e) { + logger.error("Failed to deserialize",e); + } + return result; + } + + public static boolean isEmpty(byte[] data) { + return (data == null || data.length == 0); + } + + /** + * 序列化 + * @param object + * @return + */ + public static byte[] serialize(Object object) { + + byte[] result = null; + + if (object == null) { + return new byte[0]; + } + try { + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(128); + try { + if (!(object instanceof Serializable)) { + throw new IllegalArgumentException(SerializeUtils.class.getSimpleName() + " requires a Serializable payload " + + "but received an object of type [" + object.getClass().getName() + "]"); + } + ObjectOutputStream objectOutputStream = new ObjectOutputStream(byteStream); + objectOutputStream.writeObject(object); + objectOutputStream.flush(); + result = byteStream.toByteArray(); + } + catch (Throwable ex) { + throw new Exception("Failed to serialize", ex); + } + } catch (Exception ex) { + logger.error("Failed to serialize",ex); + } + return result; + } +}
85
modify jedis usage to JedisPool
0
.java
java
mit
alexxiyang/shiro-redis
1776
<NME> setup.py <BEF> # -*- coding: utf-8 -* from setuptools.command.install import install from setuptools import find_packages from setuptools import setup from sys import version_info, stderr, exit import codecs import sys import os if sys.platform == "win32" or sys.platform == "cygwin": stderr.write("Hitch will not work on Windows. Sorry.\n") return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.4.3", description="Loosely coupled testing framework", long_description=read('README.rst'), classifiers=[ if version_info[0] == 3: if version_info[1] < 3: stderr.write("The hitch bootstrapper will not run on python 3.0.x, 3.1.x or 3.2.x.\n") exit(1) def read(*parts): # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", version="0.5.7", description="Bootstrapper for hitchtest - the loosely coupled integration testing framework", long_description=read('README.rst'), classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries', 'Operating System :: Unix', 'Environment :: Console', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', ], keywords='hitch testing framework bdd tdd declarative tests bootstrap virtualenv', author='Colm O\'Connor', author_email='[email protected]', url='https://hitchtest.readthedocs.org/', license='AGPL', install_requires=[], packages=find_packages(exclude=["docs", ]), package_data={}, entry_points=dict(console_scripts=['hitch=hitch:commandline.run',]), zip_safe=False, include_package_data=True, ) <MSG> RELEASE : Bumped version. <DFF> @@ -13,7 +13,7 @@ def read(*parts): return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read() setup(name="hitch", - version="0.4.3", + version="0.4.4", description="Loosely coupled testing framework", long_description=read('README.rst'), classifiers=[
1
RELEASE : Bumped version.
1
.py
py
agpl-3.0
hitchtest/hitch
1777
<NME> voc_layers.py <BEF> ADDFILE <MSG> add examples/Seg-FCN <DFF> @@ -0,0 +1,239 @@ +# -------------------------------------------------------- +# Seg-FCN for Dragon +# Copyright (c) 2017 SeetaTech +# Source Code by Evan Shelhamer +# Re-Written by Ting Pan +# -------------------------------------------------------- + +import dragon.vm.caffe as caffe +import dragon.core.workspace as ws + +import numpy as np +from PIL import Image + +import random + +class VOCSegDataLayer(caffe.Layer): + """ + Load (input image, label image) pairs from PASCAL VOC + one-at-a-time while reshaping the net to preserve dimensions. + + Use this to feed data to a fully convolutional network. + """ + + def setup(self, bottom, top): + """ + Setup data layer according to parameters: + + - voc_dir: path to PASCAL VOC year dir + - split: train / val / test + - mean: tuple of mean values to subtract + - randomize: load in random order (default: True) + - seed: seed for randomization (default: None / current time) + + for PASCAL VOC semantic segmentation. + + example + + params = dict(voc_dir="/path/to/PASCAL/VOC2011", + mean=(104.00698793, 116.66876762, 122.67891434), + split="val") + """ + # config + params = eval(self.param_str) + self.voc_dir = params['voc_dir'] + self.split = params['split'] + self.mean = np.array(params['mean']) + self.random = params.get('randomize', True) + self.seed = params.get('seed', None) + + # two tops: data and label + if len(top) != 2: + raise Exception("Need to define two tops: data and label.") + # data layers have no bottoms + if len(bottom) != 0: + raise Exception("Do not define a bottom.") + + # load indices for images and labels + split_f = '../data/{}.txt'.format(self.split) + self.indices = open(split_f, 'r').read().splitlines() + self.idx = 0 + + # make eval deterministic + if 'train' not in self.split: + self.random = False + + # randomization: seed and pick + if self.random: + random.seed(self.seed) + self.idx = random.randint(0, len(self.indices)-1) + + + def reshape(self, bottom, top): + # load image + label image pair + self.data = self.load_image(self.indices[self.idx]) + self.label = self.load_label(self.indices[self.idx]) + # reshape tops to fit (leading 1 is for batch dimension) + self.data = self.data.reshape(1, *self.data.shape) + self.label = self.label.reshape(1, *self.label.shape) + + + def forward(self, bottom, top): + # assign output + ws.FeedTensor(top[0], self.data) + ws.FeedTensor(top[1], self.label) + + # pick next input + if self.random: + self.idx = random.randint(0, len(self.indices)-1) + else: + self.idx += 1 + if self.idx == len(self.indices): + self.idx = 0 + + + def backward(self, top, propagate_down, bottom): + pass + + + def load_image(self, idx): + """ + Load input image and preprocess for Caffe: + - cast to float + - switch channels RGB -> BGR + - subtract mean + - transpose to channel x height x width order + """ + im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx)) + in_ = np.array(im, dtype=np.float32) + in_ = in_[:,:,::-1] + in_ -= self.mean + in_ = in_.transpose((2,0,1)) + return in_ + + + def load_label(self, idx): + """ + Load label image as 1 x height x width integer array of label indices. + The leading singleton dimension is required by the loss. + """ + im = Image.open('{}/SegmentationClass/{}.png'.format(self.voc_dir, idx)) + label = np.array(im, dtype=np.float32) + label = label[np.newaxis, ...] + return label + + +class SBDDSegDataLayer(caffe.Layer): + """ + Load (input image, label image) pairs from the SBDD extended labeling + of PASCAL VOC for semantic segmentation + one-at-a-time while reshaping the net to preserve dimensions. + + Use this to feed data to a fully convolutional network. + """ + + def setup(self, bottom, top): + """ + Setup data layer according to parameters: + + - sbdd_dir: path to SBDD `dataset` dir + - split: train / seg11valid + - mean: tuple of mean values to subtract + - randomize: load in random order (default: True) + - seed: seed for randomization (default: None / current time) + + for SBDD semantic segmentation. + + N.B.segv11alid is the set of segval11 that does not intersect with SBDD. + Find it here: https://gist.github.com/shelhamer/edb330760338892d511e. + + example + + params = dict(sbdd_dir="/path/to/SBDD/dataset", + mean=(104.00698793, 116.66876762, 122.67891434), + split="valid") + """ + # config + params = eval(self.param_str) + self.sbdd_dir = params['sbdd_dir'] + self.split = params['split'] + self.mean = np.array(params['mean']) + self.random = params.get('randomize', True) + self.seed = params.get('seed', None) + + # two tops: data and label + if len(top) != 2: + raise Exception("Need to define two tops: data and label.") + # data layers have no bottoms + if len(bottom) != 0: + raise Exception("Do not define a bottom.") + + # load indices for images and labels + split_f = '{}/{}.txt'.format(self.sbdd_dir, + self.split) + self.indices = open(split_f, 'r').read().splitlines() + self.idx = 0 + + # make eval deterministic + if 'train' not in self.split: + self.random = False + + # randomization: seed and pick + if self.random: + random.seed(self.seed) + self.idx = random.randint(0, len(self.indices)-1) + + + def reshape(self, bottom, top): + # load image + label image pair + self.data = self.load_image(self.indices[self.idx]) + self.label = self.load_label(self.indices[self.idx]) + # reshape tops to fit (leading 1 is for batch dimension) + self.data = self.data.reshape(1, *self.data.shape) + self.label = self.label.reshape(1, *self.label.shape) + + + def forward(self, bottom, top): + # assign output + ws.FeedTensor(top[0], self.data) + ws.FeedTensor(top[1], self.label) + + # pick next input + if self.random: + self.idx = random.randint(0, len(self.indices)-1) + else: + self.idx += 1 + if self.idx == len(self.indices): + self.idx = 0 + + + def backward(self, top, propagate_down, bottom): + pass + + + def load_image(self, idx): + """ + Load input image and preprocess for Caffe: + - cast to float + - switch channels RGB -> BGR + - subtract mean + - transpose to channel x height x width order + """ + im = Image.open('{}/img/{}.jpg'.format(self.sbdd_dir, idx)) + in_ = np.array(im, dtype=np.float32) + in_ = in_[:,:,::-1] + in_ -= self.mean + in_ = in_.transpose((2,0,1)) + return in_ + + + def load_label(self, idx): + """ + Load label image as 1 x height x width integer array of label indices. + The leading singleton dimension is required by the loss. + """ + import scipy.io + mat = scipy.io.loadmat('{}/cls/{}.mat'.format(self.sbdd_dir, idx)) + label = mat['GTcls'][0]['Segmentation'][0].astype(np.float32) + label = label[np.newaxis, ...] + return label
239
add examples/Seg-FCN
0
.py
py
bsd-2-clause
neopenx/Dragon
1778
<NME> voc_layers.py <BEF> ADDFILE <MSG> add examples/Seg-FCN <DFF> @@ -0,0 +1,239 @@ +# -------------------------------------------------------- +# Seg-FCN for Dragon +# Copyright (c) 2017 SeetaTech +# Source Code by Evan Shelhamer +# Re-Written by Ting Pan +# -------------------------------------------------------- + +import dragon.vm.caffe as caffe +import dragon.core.workspace as ws + +import numpy as np +from PIL import Image + +import random + +class VOCSegDataLayer(caffe.Layer): + """ + Load (input image, label image) pairs from PASCAL VOC + one-at-a-time while reshaping the net to preserve dimensions. + + Use this to feed data to a fully convolutional network. + """ + + def setup(self, bottom, top): + """ + Setup data layer according to parameters: + + - voc_dir: path to PASCAL VOC year dir + - split: train / val / test + - mean: tuple of mean values to subtract + - randomize: load in random order (default: True) + - seed: seed for randomization (default: None / current time) + + for PASCAL VOC semantic segmentation. + + example + + params = dict(voc_dir="/path/to/PASCAL/VOC2011", + mean=(104.00698793, 116.66876762, 122.67891434), + split="val") + """ + # config + params = eval(self.param_str) + self.voc_dir = params['voc_dir'] + self.split = params['split'] + self.mean = np.array(params['mean']) + self.random = params.get('randomize', True) + self.seed = params.get('seed', None) + + # two tops: data and label + if len(top) != 2: + raise Exception("Need to define two tops: data and label.") + # data layers have no bottoms + if len(bottom) != 0: + raise Exception("Do not define a bottom.") + + # load indices for images and labels + split_f = '../data/{}.txt'.format(self.split) + self.indices = open(split_f, 'r').read().splitlines() + self.idx = 0 + + # make eval deterministic + if 'train' not in self.split: + self.random = False + + # randomization: seed and pick + if self.random: + random.seed(self.seed) + self.idx = random.randint(0, len(self.indices)-1) + + + def reshape(self, bottom, top): + # load image + label image pair + self.data = self.load_image(self.indices[self.idx]) + self.label = self.load_label(self.indices[self.idx]) + # reshape tops to fit (leading 1 is for batch dimension) + self.data = self.data.reshape(1, *self.data.shape) + self.label = self.label.reshape(1, *self.label.shape) + + + def forward(self, bottom, top): + # assign output + ws.FeedTensor(top[0], self.data) + ws.FeedTensor(top[1], self.label) + + # pick next input + if self.random: + self.idx = random.randint(0, len(self.indices)-1) + else: + self.idx += 1 + if self.idx == len(self.indices): + self.idx = 0 + + + def backward(self, top, propagate_down, bottom): + pass + + + def load_image(self, idx): + """ + Load input image and preprocess for Caffe: + - cast to float + - switch channels RGB -> BGR + - subtract mean + - transpose to channel x height x width order + """ + im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx)) + in_ = np.array(im, dtype=np.float32) + in_ = in_[:,:,::-1] + in_ -= self.mean + in_ = in_.transpose((2,0,1)) + return in_ + + + def load_label(self, idx): + """ + Load label image as 1 x height x width integer array of label indices. + The leading singleton dimension is required by the loss. + """ + im = Image.open('{}/SegmentationClass/{}.png'.format(self.voc_dir, idx)) + label = np.array(im, dtype=np.float32) + label = label[np.newaxis, ...] + return label + + +class SBDDSegDataLayer(caffe.Layer): + """ + Load (input image, label image) pairs from the SBDD extended labeling + of PASCAL VOC for semantic segmentation + one-at-a-time while reshaping the net to preserve dimensions. + + Use this to feed data to a fully convolutional network. + """ + + def setup(self, bottom, top): + """ + Setup data layer according to parameters: + + - sbdd_dir: path to SBDD `dataset` dir + - split: train / seg11valid + - mean: tuple of mean values to subtract + - randomize: load in random order (default: True) + - seed: seed for randomization (default: None / current time) + + for SBDD semantic segmentation. + + N.B.segv11alid is the set of segval11 that does not intersect with SBDD. + Find it here: https://gist.github.com/shelhamer/edb330760338892d511e. + + example + + params = dict(sbdd_dir="/path/to/SBDD/dataset", + mean=(104.00698793, 116.66876762, 122.67891434), + split="valid") + """ + # config + params = eval(self.param_str) + self.sbdd_dir = params['sbdd_dir'] + self.split = params['split'] + self.mean = np.array(params['mean']) + self.random = params.get('randomize', True) + self.seed = params.get('seed', None) + + # two tops: data and label + if len(top) != 2: + raise Exception("Need to define two tops: data and label.") + # data layers have no bottoms + if len(bottom) != 0: + raise Exception("Do not define a bottom.") + + # load indices for images and labels + split_f = '{}/{}.txt'.format(self.sbdd_dir, + self.split) + self.indices = open(split_f, 'r').read().splitlines() + self.idx = 0 + + # make eval deterministic + if 'train' not in self.split: + self.random = False + + # randomization: seed and pick + if self.random: + random.seed(self.seed) + self.idx = random.randint(0, len(self.indices)-1) + + + def reshape(self, bottom, top): + # load image + label image pair + self.data = self.load_image(self.indices[self.idx]) + self.label = self.load_label(self.indices[self.idx]) + # reshape tops to fit (leading 1 is for batch dimension) + self.data = self.data.reshape(1, *self.data.shape) + self.label = self.label.reshape(1, *self.label.shape) + + + def forward(self, bottom, top): + # assign output + ws.FeedTensor(top[0], self.data) + ws.FeedTensor(top[1], self.label) + + # pick next input + if self.random: + self.idx = random.randint(0, len(self.indices)-1) + else: + self.idx += 1 + if self.idx == len(self.indices): + self.idx = 0 + + + def backward(self, top, propagate_down, bottom): + pass + + + def load_image(self, idx): + """ + Load input image and preprocess for Caffe: + - cast to float + - switch channels RGB -> BGR + - subtract mean + - transpose to channel x height x width order + """ + im = Image.open('{}/img/{}.jpg'.format(self.sbdd_dir, idx)) + in_ = np.array(im, dtype=np.float32) + in_ = in_[:,:,::-1] + in_ -= self.mean + in_ = in_.transpose((2,0,1)) + return in_ + + + def load_label(self, idx): + """ + Load label image as 1 x height x width integer array of label indices. + The leading singleton dimension is required by the loss. + """ + import scipy.io + mat = scipy.io.loadmat('{}/cls/{}.mat'.format(self.sbdd_dir, idx)) + label = mat['GTcls'][0]['Segmentation'][0].astype(np.float32) + label = label[np.newaxis, ...] + return label
239
add examples/Seg-FCN
0
.py
py
bsd-2-clause
neopenx/Dragon
1779
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. For example, if you use UserInfo as Principal class, the id field maybe userId, userName, email, etc. Remember to add getter to this id field. For example, getUserId(), getUserName(), getEmail(), etc. # Default value is authCacheKey or id, that means your principal object has a method called "getAuthCacheKey()" or "getId()" # # cacheManager.principalIdFieldName = id <MSG> Update README.md Update README.md <DFF> @@ -166,7 +166,9 @@ securityManager.sessionManager = $sessionManager # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager -# Principal id field name. The field which you can get unique id to identify this principal. For example, if you use UserInfo as Principal class, the id field maybe userId, userName, email, etc. Remember to add getter to this id field. For example, getUserId(), getUserName(), getEmail(), etc. +# Principal id field name. The field which you can get unique id to identify this principal. +# For example, if you use UserInfo as Principal class, the id field maybe userId, userName, email, etc. +# Remember to add getter to this id field. For example, getUserId(), getUserName(), getEmail(), etc. # Default value is authCacheKey or id, that means your principal object has a method called "getAuthCacheKey()" or "getId()" # # cacheManager.principalIdFieldName = id
3
Update README.md
1
.md
md
mit
alexxiyang/shiro-redis
1780
<NME> fused_batch_norm.cc <BEF> #include "operators/norm/batch_norm_op.h" #include "core/workspace.h" #include "utils/math_functions.h" #include "utils/filler.h" namespace dragon { template <class Context> template <typename T> void FusedBatchNormOp<Context>::TrainingRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var TENSOR_FILL(input(3), vector<TIndex>(1, C)); // scale TENSOR_FILL(input(4), vector<TIndex>(1, C)); // bias auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Bdata = input(4).template data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* Xdata = input(0).template data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(output(0)->count(), Ydata, Xdata); // compute mean if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0 / NS, Xdata, SMul_data, 0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0, tMean_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0 / NS, Xdata, NSMul_data, 0, tMean_data); } // subtract mean if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, -1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, -1.0, NSMul_data, tMean_data, 1.0, Ydata); } // compute variance // note that we use VAR(X) = E((X - EX) ^ 2) math::Square<T, Context>(output(0)->count(), Ydata, Std_data); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0 / NS, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tVar_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0 / NS, Std_data, NSMul_data, 0.0, tVar_data); } // compute moving average if (!is_recomputing) { // History(X) = (1 - momentum) * Cur(X) + momentum * History(X) math::Axpby<T, Context>(mean->count(), 1.0 - momentum, tMean_data, momentum, hMean_data); math::Axpby<T, Context>(var->count(), 1.0 - momentum, tVar_data, momentum, hVar_data); } // compute stddev math::AddScalar<T, Context>(var->count(), eps, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data); // divide by stddev if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // store x_norm for backward auto* XNorm_data = x_norm->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(output(0)->count(), XNorm_data, Ydata); // scale if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // shift if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Bdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Bdata, 1.0, Ydata); } ws()->ReleaseBuffer(stddev); } template <class Context> template <typename T> void FusedBatchNormOp<Context>::InferenceRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var TENSOR_FILL(input(3), vector<TIndex>(1, C)); // scale TENSOR_FILL(input(4), vector<TIndex>(1, C)); // bias auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Bdata = input(4).template data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* Xdata = input(0).template data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata); ctx().template Copy<T, Context, Context>(mean->count(), tMean_data, hMean_data); ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data); // subtract mean if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, -1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, -1.0, NSMul_data, tMean_data, 1.0, Ydata); } // compute stddev math::AddScalar<T, Context>(var->count(), eps, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data); // divide by stddev if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // scale if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // shift if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Bdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Bdata, 1.0, Ydata); } ws()->ReleaseBuffer(stddev); } template <class Context> void FusedBatchNormOp<Context>::Setup() { // determine the mode if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; else use_global_stats = use_stats == 1 ? true : false; is_recomputing = ws()->GetTensor("/opt/mirror_stage/recompute_flag") ->template data<bool, CPUContext>()[0]; // determine the data format TIndex channel_axis = axis; data_format = "NCHW"; if (channel_axis == -1) channel_axis += (int)input(0).ndim(); if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC"; N = input(0).dim(0); C = input(0).dim(channel_axis); NC = N * C; S = input(0).count() / NC; NS = N * S; // make resource mean = ws()->CreateTensor("/mnt/" + anchor() + "/bn_mean"); var = ws()->CreateTensor("/mnt/" + anchor() + "/bn_var"); x_norm = ws()->CreateTensor("/mnt/" + anchor() + "/bn_x_norm"); stddev = ws()->GetBuffer(); stddev->ReshapeLike(input(0)); // reshape mean->Reshape(vector<TIndex>(1, C)); var->Reshape(vector<TIndex>(1, C)); num_by_chans.Reshape(vector<TIndex>(1, NC)); x_norm->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0)); } template <class Context> void FusedBatchNormOp<Context>::RunOnDevice() { Setup(); if (input(0).template IsType<float>()) { if (use_global_stats) InferenceRunWithType<float>(); else TrainingRunWithType<float>(); } #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) { if (use_global_stats) InferenceRunWithType<float16>(); else TrainingRunWithType<float16>(); } #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(FusedBatchNorm); #ifdef WITH_CUDA DEPLOY_CUDA(FusedBatchNorm); #endif OPERATOR_SCHEMA(FusedBatchNorm).NumInputs(5).NumOutputs(1); template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::TrainingRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* XNorm_data = x_norm->template data<T, Context>(); // gradient w.r.t. scale if (output(1)->name() != "ignore") { auto* dSdata = output(1)->template mutable_data<T, Context>(); math::Mul<T, Context>(stddev->count(), XNorm_data, dYdata, Std_data); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 1.0, dSdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, Std_data, NSMul_data, 1.0, dSdata); } } // gradient w.r.t. bias if (output(2)->name() != "ignore") { auto* dBdata = output(2)->template mutable_data<T, Context>(); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, dYdata, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 1.0, dBdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, dYdata, NSMul_data, 1.0, dBdata); } } // gradient w.r.t. x if (output(0)->name() != "ignore") { // scale * dY if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(stddev->count(), Std_data, dYdata, Std_data); // sum of x_hat * (dl / dx_hat) math::Mul<T, Context>(stddev->count(), XNorm_data, Std_data, dXdata); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, dXdata, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tMean_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, dXdata, NSMul_data, 0.0, tMean_data); } // x_hat times the sum if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, dXdata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tMean_data, 0.0, dXdata); } math::Mul<T, Context>(stddev->count(), XNorm_data, dXdata, dXdata); // subtract the average of x_hat times the sum if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tMean_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, dXdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, Std_data, NSMul_data, 0.0, tMean_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tMean_data, 1.0, dXdata); } math::Axpby<T, Context>(stddev->count(), 1.0, Std_data, -1.0 / NS, dXdata); // multiply with the inverse std if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } // divide by stddev math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata); } ws()->ReleaseBuffer(stddev); } template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::InferenceRunWithType() { if (output(0)->name() != "ignore") { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* hVar_data = input(2).template data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); // divide scale by stddev math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data); // compute dE/dY \cot (scale / std(X)) if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, dXdata); } ws()->ReleaseBuffer(stddev); } template <class Context> void FusedBatchNormGradientOp<Context>::Setup() { // determine the mode if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; else use_global_stats = use_stats == 1 ? true : false; // determine the data format TIndex channel_axis = axis; data_format = "NCHW"; if (channel_axis == -1) channel_axis += (int)input(0).ndim(); if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC"; N = input(0).dim(0); C = input(0).dim(channel_axis); NC = N * C; S = input(0).count() / NC; NS = N * S; // make resource mean = ws()->GetTensor("/mnt/" + anchor() + "/bn_mean"); var = ws()->GetTensor("/mnt/" + anchor() + "/bn_var"); x_norm = ws()->GetTensor("/mnt/" + anchor() + "/bn_x_norm"); stddev = ws()->GetBuffer(); stddev->ReshapeLike(input(0)); // reshape num_by_chans.Reshape(vector<TIndex>(1, NC)); output(0)->ReshapeLike(input(0)); } template <class Context> void FusedBatchNormGradientOp<Context>::RunOnDevice() { Setup(); if (input(0).template IsType<float>()) { if (use_global_stats) InferenceRunWithType<float>(); else TrainingRunWithType<float>(); } #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) { if (use_global_stats) InferenceRunWithType<float16>(); else TrainingRunWithType<float16>(); } #endif else LOG(FATAL) << "Unsupported input types."; } template <class Context> void FusedBatchNormGradientOp<Context>::ShareGradient() { if (use_global_stats) { if (output(0)->name() != "ignore") { Tensor* dX = ws()->GetBuffer("Grad"); ws()->CreateAvatar(output(0), dX); } } else { if (output(0)->name() != "ignore" || output(1)->name() != "ignore" || output(2)->name() != "ignore") { Tensor* dX = ws()->GetBuffer("Grad"); ws()->CreateAvatar(output(0), dX); } } } DEPLOY_CPU(FusedBatchNormGradient); #ifdef WITH_CUDA DEPLOY_CUDA(FusedBatchNormGradient); #endif OPERATOR_SCHEMA(FusedBatchNormGradient).NumInputs(5).NumOutputs(3); class GetFusedBatchNormGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetFusedBatchNormGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), I(1), I(2), I(3), GO(0)}, vector<string> {GI(0), GI(3), GI(4)}); } }; REGISTER_GRADIENT(FusedBatchNorm, GetFusedBatchNormGradient); } // namespace dragon <MSG> Add Contrib ops <DFF> @@ -429,21 +429,43 @@ void FusedBatchNormGradientOp<Context>::TrainingRunWithType() { template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::InferenceRunWithType() { + INIT_MULTIPLIER(multiplier, NS); + INIT_MULTIPLIER(num_multiplier, N); + INIT_MULTIPLIER(spatial_multiplier, S); + + auto* dYdata = input(-1).template data<T, Context>(); + auto* Sdata = input(3).template data<T, Context>(); + auto* tVar_data = var->template mutable_data<T, Context>(); + auto* NSMul_data = multiplier->template data<T, Context>(); + auto* SMul_data = spatial_multiplier->template data<T, Context>(); + auto* NMul_data = num_multiplier->template data<T, Context>(); + auto* NC_data = num_by_chans.template mutable_data<T, Context>(); + + // gradient w.r.t. scale + if (output(1)->name() != "ignore") + LOG(FATAL) << "The gamma should be fixed if using global stats."; + + // gradient w.r.t. bias + if (output(2)->name() != "ignore") { + auto* dBdata = output(2)->template mutable_data<T, Context>(); + if (data_format == "NCHW") { + math::Gemv<T, Context>(CblasNoTrans, NC, S, + 1.0, dYdata, SMul_data, + 0.0, NC_data); + math::Gemv<T, Context>(CblasTrans, N, C, + 1.0, NC_data, NMul_data, + 1.0, dBdata); + } else if (data_format == "NHWC") { + math::Gemv<T, Context>(CblasTrans, NS, C, + 1.0, dYdata, NSMul_data, + 1.0, dBdata); + } + } + + // gradient w.r.t. x if (output(0)->name() != "ignore") { - INIT_MULTIPLIER(multiplier, NS); - INIT_MULTIPLIER(num_multiplier, N); - INIT_MULTIPLIER(spatial_multiplier, S); - - auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); - auto* Sdata = input(3).template data<T, Context>(); - auto* hVar_data = input(2).template data<T, Context>(); - auto* tVar_data = var->template mutable_data<T, Context>(); - auto* NSMul_data = multiplier->template data<T, Context>(); - auto* SMul_data = spatial_multiplier->template data<T, Context>(); - auto* NMul_data = num_multiplier->template data<T, Context>(); - auto* NC_data = num_by_chans.template mutable_data<T, Context>(); // divide scale by stddev math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data); @@ -492,7 +514,9 @@ void FusedBatchNormGradientOp<Context>::Setup() { // reshape num_by_chans.Reshape(vector<TIndex>(1, NC)); - output(0)->ReshapeLike(input(0)); + output(0)->ReshapeLike(input(0)); // dX + output(1)->ReshapeLike(input(3)); // dScale + output(2)->ReshapeLike(input(3)); // dBias } template <class Context>
37
Add Contrib ops
13
.cc
cc
bsd-2-clause
neopenx/Dragon
1781
<NME> fused_batch_norm.cc <BEF> #include "operators/norm/batch_norm_op.h" #include "core/workspace.h" #include "utils/math_functions.h" #include "utils/filler.h" namespace dragon { template <class Context> template <typename T> void FusedBatchNormOp<Context>::TrainingRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var TENSOR_FILL(input(3), vector<TIndex>(1, C)); // scale TENSOR_FILL(input(4), vector<TIndex>(1, C)); // bias auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Bdata = input(4).template data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* Xdata = input(0).template data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(output(0)->count(), Ydata, Xdata); // compute mean if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0 / NS, Xdata, SMul_data, 0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0, tMean_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0 / NS, Xdata, NSMul_data, 0, tMean_data); } // subtract mean if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, -1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, -1.0, NSMul_data, tMean_data, 1.0, Ydata); } // compute variance // note that we use VAR(X) = E((X - EX) ^ 2) math::Square<T, Context>(output(0)->count(), Ydata, Std_data); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0 / NS, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tVar_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0 / NS, Std_data, NSMul_data, 0.0, tVar_data); } // compute moving average if (!is_recomputing) { // History(X) = (1 - momentum) * Cur(X) + momentum * History(X) math::Axpby<T, Context>(mean->count(), 1.0 - momentum, tMean_data, momentum, hMean_data); math::Axpby<T, Context>(var->count(), 1.0 - momentum, tVar_data, momentum, hVar_data); } // compute stddev math::AddScalar<T, Context>(var->count(), eps, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data); // divide by stddev if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // store x_norm for backward auto* XNorm_data = x_norm->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(output(0)->count(), XNorm_data, Ydata); // scale if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // shift if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Bdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Bdata, 1.0, Ydata); } ws()->ReleaseBuffer(stddev); } template <class Context> template <typename T> void FusedBatchNormOp<Context>::InferenceRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); TENSOR_FILL(input(1), vector<TIndex>(1, C)); // history_mean TENSOR_FILL(input(2), vector<TIndex>(1, C)); // history_var TENSOR_FILL(input(3), vector<TIndex>(1, C)); // scale TENSOR_FILL(input(4), vector<TIndex>(1, C)); // bias auto* hMean_data = input(1).template mutable_data<T, Context>(); auto* hVar_data = input(2).template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Bdata = input(4).template data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* Xdata = input(0).template data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); ctx().template Copy<T, Context, Context>(input(0).count(), Ydata, Xdata); ctx().template Copy<T, Context, Context>(mean->count(), tMean_data, hMean_data); ctx().template Copy<T, Context, Context>(var->count(), tVar_data, hVar_data); // subtract mean if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, -1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, -1.0, NSMul_data, tMean_data, 1.0, Ydata); } // compute stddev math::AddScalar<T, Context>(var->count(), eps, tVar_data); math::Sqrt<T, Context>(var->count(), tVar_data, tVar_data); // divide by stddev if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Div<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // scale if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), Ydata, Std_data, Ydata); // shift if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Bdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, Ydata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Bdata, 1.0, Ydata); } ws()->ReleaseBuffer(stddev); } template <class Context> void FusedBatchNormOp<Context>::Setup() { // determine the mode if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; else use_global_stats = use_stats == 1 ? true : false; is_recomputing = ws()->GetTensor("/opt/mirror_stage/recompute_flag") ->template data<bool, CPUContext>()[0]; // determine the data format TIndex channel_axis = axis; data_format = "NCHW"; if (channel_axis == -1) channel_axis += (int)input(0).ndim(); if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC"; N = input(0).dim(0); C = input(0).dim(channel_axis); NC = N * C; S = input(0).count() / NC; NS = N * S; // make resource mean = ws()->CreateTensor("/mnt/" + anchor() + "/bn_mean"); var = ws()->CreateTensor("/mnt/" + anchor() + "/bn_var"); x_norm = ws()->CreateTensor("/mnt/" + anchor() + "/bn_x_norm"); stddev = ws()->GetBuffer(); stddev->ReshapeLike(input(0)); // reshape mean->Reshape(vector<TIndex>(1, C)); var->Reshape(vector<TIndex>(1, C)); num_by_chans.Reshape(vector<TIndex>(1, NC)); x_norm->ReshapeLike(input(0)); output(0)->ReshapeLike(input(0)); } template <class Context> void FusedBatchNormOp<Context>::RunOnDevice() { Setup(); if (input(0).template IsType<float>()) { if (use_global_stats) InferenceRunWithType<float>(); else TrainingRunWithType<float>(); } #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) { if (use_global_stats) InferenceRunWithType<float16>(); else TrainingRunWithType<float16>(); } #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(FusedBatchNorm); #ifdef WITH_CUDA DEPLOY_CUDA(FusedBatchNorm); #endif OPERATOR_SCHEMA(FusedBatchNorm).NumInputs(5).NumOutputs(1); template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::TrainingRunWithType() { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); auto* tMean_data = mean->template mutable_data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); auto* XNorm_data = x_norm->template data<T, Context>(); // gradient w.r.t. scale if (output(1)->name() != "ignore") { auto* dSdata = output(1)->template mutable_data<T, Context>(); math::Mul<T, Context>(stddev->count(), XNorm_data, dYdata, Std_data); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 1.0, dSdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, Std_data, NSMul_data, 1.0, dSdata); } } // gradient w.r.t. bias if (output(2)->name() != "ignore") { auto* dBdata = output(2)->template mutable_data<T, Context>(); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, dYdata, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 1.0, dBdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, dYdata, NSMul_data, 1.0, dBdata); } } // gradient w.r.t. x if (output(0)->name() != "ignore") { // scale * dY if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, Sdata, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, Sdata, 0.0, Std_data); } math::Mul<T, Context>(stddev->count(), Std_data, dYdata, Std_data); // sum of x_hat * (dl / dx_hat) math::Mul<T, Context>(stddev->count(), XNorm_data, Std_data, dXdata); if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, dXdata, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tMean_data); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, dXdata, NSMul_data, 0.0, tMean_data); } // x_hat times the sum if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, dXdata); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tMean_data, 0.0, dXdata); } math::Mul<T, Context>(stddev->count(), XNorm_data, dXdata, dXdata); // subtract the average of x_hat times the sum if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, NC, S, 1.0, Std_data, SMul_data, 0.0, NC_data); math::Gemv<T, Context>(CblasTrans, N, C, 1.0, NC_data, NMul_data, 0.0, tMean_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tMean_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 1.0, dXdata); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, NS, C, 1.0, Std_data, NSMul_data, 0.0, tMean_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tMean_data, 1.0, dXdata); } math::Axpby<T, Context>(stddev->count(), 1.0, Std_data, -1.0 / NS, dXdata); // multiply with the inverse std if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } // divide by stddev math::Div<T, Context>(output(0)->count(), dXdata, Std_data, dXdata); } ws()->ReleaseBuffer(stddev); } template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::InferenceRunWithType() { if (output(0)->name() != "ignore") { INIT_MULTIPLIER(multiplier, NS); INIT_MULTIPLIER(num_multiplier, N); INIT_MULTIPLIER(spatial_multiplier, S); auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); auto* Sdata = input(3).template data<T, Context>(); auto* hVar_data = input(2).template data<T, Context>(); auto* tVar_data = var->template mutable_data<T, Context>(); auto* NSMul_data = multiplier->template data<T, Context>(); auto* SMul_data = spatial_multiplier->template data<T, Context>(); auto* NMul_data = num_multiplier->template data<T, Context>(); auto* NC_data = num_by_chans.template mutable_data<T, Context>(); // divide scale by stddev math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data); // compute dE/dY \cot (scale / std(X)) if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, N, C, 1, 1.0, NMul_data, tVar_data, 0.0, NC_data); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NC, S, 1, 1.0, NC_data, SMul_data, 0.0, Std_data); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, NS, C, 1, 1.0, NSMul_data, tVar_data, 0.0, Std_data); } math::Mul<T, Context>(output(0)->count(), dYdata, Std_data, dXdata); } ws()->ReleaseBuffer(stddev); } template <class Context> void FusedBatchNormGradientOp<Context>::Setup() { // determine the mode if (use_stats == -1) use_global_stats = phase() == "TEST" ? true : false; else use_global_stats = use_stats == 1 ? true : false; // determine the data format TIndex channel_axis = axis; data_format = "NCHW"; if (channel_axis == -1) channel_axis += (int)input(0).ndim(); if (channel_axis + 1 == (int)input(0).ndim()) data_format = "NHWC"; N = input(0).dim(0); C = input(0).dim(channel_axis); NC = N * C; S = input(0).count() / NC; NS = N * S; // make resource mean = ws()->GetTensor("/mnt/" + anchor() + "/bn_mean"); var = ws()->GetTensor("/mnt/" + anchor() + "/bn_var"); x_norm = ws()->GetTensor("/mnt/" + anchor() + "/bn_x_norm"); stddev = ws()->GetBuffer(); stddev->ReshapeLike(input(0)); // reshape num_by_chans.Reshape(vector<TIndex>(1, NC)); output(0)->ReshapeLike(input(0)); } template <class Context> void FusedBatchNormGradientOp<Context>::RunOnDevice() { Setup(); if (input(0).template IsType<float>()) { if (use_global_stats) InferenceRunWithType<float>(); else TrainingRunWithType<float>(); } #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) { if (use_global_stats) InferenceRunWithType<float16>(); else TrainingRunWithType<float16>(); } #endif else LOG(FATAL) << "Unsupported input types."; } template <class Context> void FusedBatchNormGradientOp<Context>::ShareGradient() { if (use_global_stats) { if (output(0)->name() != "ignore") { Tensor* dX = ws()->GetBuffer("Grad"); ws()->CreateAvatar(output(0), dX); } } else { if (output(0)->name() != "ignore" || output(1)->name() != "ignore" || output(2)->name() != "ignore") { Tensor* dX = ws()->GetBuffer("Grad"); ws()->CreateAvatar(output(0), dX); } } } DEPLOY_CPU(FusedBatchNormGradient); #ifdef WITH_CUDA DEPLOY_CUDA(FusedBatchNormGradient); #endif OPERATOR_SCHEMA(FusedBatchNormGradient).NumInputs(5).NumOutputs(3); class GetFusedBatchNormGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetFusedBatchNormGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), I(1), I(2), I(3), GO(0)}, vector<string> {GI(0), GI(3), GI(4)}); } }; REGISTER_GRADIENT(FusedBatchNorm, GetFusedBatchNormGradient); } // namespace dragon <MSG> Add Contrib ops <DFF> @@ -429,21 +429,43 @@ void FusedBatchNormGradientOp<Context>::TrainingRunWithType() { template <class Context> template <typename T> void FusedBatchNormGradientOp<Context>::InferenceRunWithType() { + INIT_MULTIPLIER(multiplier, NS); + INIT_MULTIPLIER(num_multiplier, N); + INIT_MULTIPLIER(spatial_multiplier, S); + + auto* dYdata = input(-1).template data<T, Context>(); + auto* Sdata = input(3).template data<T, Context>(); + auto* tVar_data = var->template mutable_data<T, Context>(); + auto* NSMul_data = multiplier->template data<T, Context>(); + auto* SMul_data = spatial_multiplier->template data<T, Context>(); + auto* NMul_data = num_multiplier->template data<T, Context>(); + auto* NC_data = num_by_chans.template mutable_data<T, Context>(); + + // gradient w.r.t. scale + if (output(1)->name() != "ignore") + LOG(FATAL) << "The gamma should be fixed if using global stats."; + + // gradient w.r.t. bias + if (output(2)->name() != "ignore") { + auto* dBdata = output(2)->template mutable_data<T, Context>(); + if (data_format == "NCHW") { + math::Gemv<T, Context>(CblasNoTrans, NC, S, + 1.0, dYdata, SMul_data, + 0.0, NC_data); + math::Gemv<T, Context>(CblasTrans, N, C, + 1.0, NC_data, NMul_data, + 1.0, dBdata); + } else if (data_format == "NHWC") { + math::Gemv<T, Context>(CblasTrans, NS, C, + 1.0, dYdata, NSMul_data, + 1.0, dBdata); + } + } + + // gradient w.r.t. x if (output(0)->name() != "ignore") { - INIT_MULTIPLIER(multiplier, NS); - INIT_MULTIPLIER(num_multiplier, N); - INIT_MULTIPLIER(spatial_multiplier, S); - - auto* dYdata = input(-1).template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Std_data = stddev->template mutable_data<T, Context>(); - auto* Sdata = input(3).template data<T, Context>(); - auto* hVar_data = input(2).template data<T, Context>(); - auto* tVar_data = var->template mutable_data<T, Context>(); - auto* NSMul_data = multiplier->template data<T, Context>(); - auto* SMul_data = spatial_multiplier->template data<T, Context>(); - auto* NMul_data = num_multiplier->template data<T, Context>(); - auto* NC_data = num_by_chans.template mutable_data<T, Context>(); // divide scale by stddev math::Div<T, Context>(var->count(), Sdata, tVar_data, tVar_data); @@ -492,7 +514,9 @@ void FusedBatchNormGradientOp<Context>::Setup() { // reshape num_by_chans.Reshape(vector<TIndex>(1, NC)); - output(0)->ReshapeLike(input(0)); + output(0)->ReshapeLike(input(0)); // dX + output(1)->ReshapeLike(input(3)); // dScale + output(2)->ReshapeLike(input(3)); // dBias } template <class Context>
37
Add Contrib ops
13
.cc
cc
bsd-2-clause
neopenx/Dragon
1782
<NME> l2_norm_op.cc <BEF> #include "operators/norm/l2_norm_op.h" #include "core/workspace.h" #include "utils/math_functions.h" namespace dragon { template <class Context> template <typename T> void L2NormOp<Context>::RunWithType() { INIT_MULTIPLIER(multiplier, dim); // normalize by outer dim independently buffer = ws()->GetBuffer(); vector<TIndex> dims = input(0).dims(); for (int i = 0; i < axis; i++) dims[i] = 1; buffer->Reshape(dims); // normalize by inner_dim independently if not across it norm = ws()->CreateTensor("/mnt/" + anchor() + "/l2norm_normalizer"); dims = input(0).dims(); for (int i = axis; i < end_axis; i++) dims[i] = 1; norm->Reshape(dims); auto* Xdata = input(0).template data<T, Context>(); auto* DMuldata = multiplier->data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* Bdata = buffer->template mutable_data<T, Context>(); auto* Ndata = norm->template mutable_data<T, Context>(); math::Set<T, Context>(norm->count(), dragon_cast<T, float>(eps), Ndata); for (int n = 0; n < outer_dim; n++) { if (across_inner) { auto* Ndata_ = norm->template mutable_data<float, CPUContext>(); float sum_of_sqr = math::Dot<T, Context>(buffer->count(), Xdata, Xdata); if (mode == "MEAN") sum_of_sqr = sum_of_sqr / dim; Ndata_[n] = pow(sum_of_sqr + eps, 0.5); math::Scale<T, Context>(buffer->count(), 1.0 / Ndata_[n], Xdata, Ydata); } else { math::Square<T, Context>(buffer->count(), Xdata, Bdata); // compute T1 = \sum_{i} x_{i,j}^{2} math::Gemv<T, Context>(CblasTrans, dim, inner_dim, mode == "MEAN" ? 1.0 / dim : 1.0, Bdata, DMuldata, 1.0, Ndata); // compute T2 = \sqrt{T1} math::Sqrt<T, Context>(inner_dim, Ndata, Ndata); // compute T3 = x / [(T2)]_{dim} math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, Ndata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), Xdata, Bdata, Ydata); Ndata += inner_dim; } Xdata += buffer->count(); Ydata += buffer->count(); } // release buffer ws()->ReleaseBuffer(buffer); } template <class Context> void L2NormOp<Context>::RunOnDevice() { if (num_axes >= 0) { if (num_axes == 0) num_axes += 1; } else num_axes = (int)input(0).ndim() - axis; end_axis = axis + num_axes; CHECK_LE(end_axis, int(input(0).ndim())); // do statistics through [axis, end_axis) outer_dim = input(0).count(0, axis); dim = input(0).count(axis, axis + num_axes); inner_dim = input(0).count(axis + num_axes); if (inner_dim == 1) across_inner = true; else across_inner = false; output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) RunWithType<float>(); #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) RunWithType<float16>(); #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(L2Norm); #ifdef WITH_CUDA DEPLOY_CUDA(L2Norm); #endif OPERATOR_SCHEMA(L2Norm).NumInputs(1).NumOutputs(1); template <class Context> template <typename T> void L2NormGradientOp<Context>::RunWithType() { INIT_MULTIPLIER(multiplier, dim); // normalize by inner_dim independently if not across it norm = ws()->GetTensor("/mnt/" + anchor() + "/l2norm_normalizer"); buffer = ws()->GetBuffer(); vector<TIndex> dims = input(0).dims(); for (int i = 0; i < axis; i++) dims[i] = 1; buffer->Reshape(dims); buffer_inner = ws()->GetBuffer(); buffer_inner->Reshape(vector<TIndex>(1, inner_dim)); auto* Xdata = input(0).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>(); auto* DMuldata = multiplier->data<T, Context>(); auto* Ndata = norm->template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Bdata = buffer->template mutable_data<T, Context>(); auto* BInnerdata = buffer_inner->template mutable_data<T, Context>(); for (int n = 0; n < outer_dim; n++) { if (across_inner) { Ndata = norm->template data<T, CPUContext>(); T sum_of_x_mul_dy = math::Dot<T, Context>(buffer->count(), Xdata, dYdata); math::Scale<T, Context>(buffer->count(), sum_of_x_mul_dy / Ndata[n] / Ndata[n], Xdata, dXdata); math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Scal<T, Context>(buffer->count(), T(1.0 / Ndata[n]), dXdata); } else { // compute \sum_{i} x_{i, j}dy_{i, j} math::Mul<T, Context>(buffer->count(), Xdata, dYdata, Bdata); math::Gemv<T, Context>(CblasTrans, dim, inner_dim, 1.0, Bdata, DMuldata, 0.0, BInnerdata); // compute T1 = x[(\sum_{i} x_{i, j}dy_{i, j})]_{dim} math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, BInnerdata, 0.0, Bdata); math::Mul<T, Context>(buffer->count(), Xdata, Bdata, dXdata); // compute T2 = T1 / Normalizer^{2} math::Pow<T, Context>(inner_dim, 2.0, Ndata, BInnerdata); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, BInnerdata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), dXdata, Bdata, dXdata); // compute T3 = (dy - T2) / Normalizer math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, Ndata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), dXdata, Bdata, dXdata); Ndata += inner_dim; } Xdata += buffer->count(); dYdata += buffer->count(); dXdata += buffer->count(); } // release buffer ws()->ReleaseBuffer(buffer_inner); ws()->ReleaseBuffer(buffer); } template <class Context> void L2NormGradientOp<Context>::RunOnDevice() { if (num_axes >= 0) { if (num_axes == 0) num_axes += 1; } else { num_axes = (int)input(0).ndim() - axis; } end_axis = axis + num_axes; CHECK_LE(end_axis, int(input(0).ndim())); // do statistics through [axis, end_axis) outer_dim = input(0).count(0, axis); dim = input(0).count(axis, axis + num_axes); inner_dim = input(0).count(axis + num_axes); if (inner_dim == 1) across_inner = true; else across_inner = false; output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(L2NormGradient); #ifdef WITH_CUDA DEPLOY_CUDA(L2NormGradient); #endif OPERATOR_SCHEMA(L2NormGradient).NumInputs(2).NumOutputs(1); class GetL2NormGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetL2NormGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), GO(0)}, vector<string> {GI(0)}); } }; REGISTER_GRADIENT(L2Norm, GetL2NormGradient); } // namespace dragon <MSG> Fix the disorder while compiling ops <DFF> @@ -116,6 +116,7 @@ void L2NormGradientOp<Context>::RunWithType() { if (across_inner) { Ndata = norm->template data<T, CPUContext>(); T sum_of_x_mul_dy = math::Dot<T, Context>(buffer->count(), Xdata, dYdata); + if (mode == "MEAN") sum_of_x_mul_dy = sum_of_x_mul_dy / dim; math::Scale<T, Context>(buffer->count(), sum_of_x_mul_dy / Ndata[n] / Ndata[n], Xdata, dXdata); math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Scal<T, Context>(buffer->count(), T(1.0 / Ndata[n]), dXdata); @@ -123,7 +124,7 @@ void L2NormGradientOp<Context>::RunWithType() { // compute \sum_{i} x_{i, j}dy_{i, j} math::Mul<T, Context>(buffer->count(), Xdata, dYdata, Bdata); math::Gemv<T, Context>(CblasTrans, dim, inner_dim, - 1.0, + mode == "MEAN" ? 1.0 / dim : 1.0, Bdata, DMuldata, 0.0, BInnerdata);
2
Fix the disorder while compiling ops
1
.cc
cc
bsd-2-clause
neopenx/Dragon
1783
<NME> l2_norm_op.cc <BEF> #include "operators/norm/l2_norm_op.h" #include "core/workspace.h" #include "utils/math_functions.h" namespace dragon { template <class Context> template <typename T> void L2NormOp<Context>::RunWithType() { INIT_MULTIPLIER(multiplier, dim); // normalize by outer dim independently buffer = ws()->GetBuffer(); vector<TIndex> dims = input(0).dims(); for (int i = 0; i < axis; i++) dims[i] = 1; buffer->Reshape(dims); // normalize by inner_dim independently if not across it norm = ws()->CreateTensor("/mnt/" + anchor() + "/l2norm_normalizer"); dims = input(0).dims(); for (int i = axis; i < end_axis; i++) dims[i] = 1; norm->Reshape(dims); auto* Xdata = input(0).template data<T, Context>(); auto* DMuldata = multiplier->data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); auto* Bdata = buffer->template mutable_data<T, Context>(); auto* Ndata = norm->template mutable_data<T, Context>(); math::Set<T, Context>(norm->count(), dragon_cast<T, float>(eps), Ndata); for (int n = 0; n < outer_dim; n++) { if (across_inner) { auto* Ndata_ = norm->template mutable_data<float, CPUContext>(); float sum_of_sqr = math::Dot<T, Context>(buffer->count(), Xdata, Xdata); if (mode == "MEAN") sum_of_sqr = sum_of_sqr / dim; Ndata_[n] = pow(sum_of_sqr + eps, 0.5); math::Scale<T, Context>(buffer->count(), 1.0 / Ndata_[n], Xdata, Ydata); } else { math::Square<T, Context>(buffer->count(), Xdata, Bdata); // compute T1 = \sum_{i} x_{i,j}^{2} math::Gemv<T, Context>(CblasTrans, dim, inner_dim, mode == "MEAN" ? 1.0 / dim : 1.0, Bdata, DMuldata, 1.0, Ndata); // compute T2 = \sqrt{T1} math::Sqrt<T, Context>(inner_dim, Ndata, Ndata); // compute T3 = x / [(T2)]_{dim} math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, Ndata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), Xdata, Bdata, Ydata); Ndata += inner_dim; } Xdata += buffer->count(); Ydata += buffer->count(); } // release buffer ws()->ReleaseBuffer(buffer); } template <class Context> void L2NormOp<Context>::RunOnDevice() { if (num_axes >= 0) { if (num_axes == 0) num_axes += 1; } else num_axes = (int)input(0).ndim() - axis; end_axis = axis + num_axes; CHECK_LE(end_axis, int(input(0).ndim())); // do statistics through [axis, end_axis) outer_dim = input(0).count(0, axis); dim = input(0).count(axis, axis + num_axes); inner_dim = input(0).count(axis + num_axes); if (inner_dim == 1) across_inner = true; else across_inner = false; output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) RunWithType<float>(); #ifdef WITH_CUDA_FP16 else if (input(0).template IsType<float16>()) RunWithType<float16>(); #endif else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(L2Norm); #ifdef WITH_CUDA DEPLOY_CUDA(L2Norm); #endif OPERATOR_SCHEMA(L2Norm).NumInputs(1).NumOutputs(1); template <class Context> template <typename T> void L2NormGradientOp<Context>::RunWithType() { INIT_MULTIPLIER(multiplier, dim); // normalize by inner_dim independently if not across it norm = ws()->GetTensor("/mnt/" + anchor() + "/l2norm_normalizer"); buffer = ws()->GetBuffer(); vector<TIndex> dims = input(0).dims(); for (int i = 0; i < axis; i++) dims[i] = 1; buffer->Reshape(dims); buffer_inner = ws()->GetBuffer(); buffer_inner->Reshape(vector<TIndex>(1, inner_dim)); auto* Xdata = input(0).template data<T, Context>(); auto* dYdata = input(-1).template data<T, Context>(); auto* DMuldata = multiplier->data<T, Context>(); auto* Ndata = norm->template data<T, Context>(); auto* dXdata = output(0)->template mutable_data<T, Context>(); auto* Bdata = buffer->template mutable_data<T, Context>(); auto* BInnerdata = buffer_inner->template mutable_data<T, Context>(); for (int n = 0; n < outer_dim; n++) { if (across_inner) { Ndata = norm->template data<T, CPUContext>(); T sum_of_x_mul_dy = math::Dot<T, Context>(buffer->count(), Xdata, dYdata); math::Scale<T, Context>(buffer->count(), sum_of_x_mul_dy / Ndata[n] / Ndata[n], Xdata, dXdata); math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Scal<T, Context>(buffer->count(), T(1.0 / Ndata[n]), dXdata); } else { // compute \sum_{i} x_{i, j}dy_{i, j} math::Mul<T, Context>(buffer->count(), Xdata, dYdata, Bdata); math::Gemv<T, Context>(CblasTrans, dim, inner_dim, 1.0, Bdata, DMuldata, 0.0, BInnerdata); // compute T1 = x[(\sum_{i} x_{i, j}dy_{i, j})]_{dim} math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, BInnerdata, 0.0, Bdata); math::Mul<T, Context>(buffer->count(), Xdata, Bdata, dXdata); // compute T2 = T1 / Normalizer^{2} math::Pow<T, Context>(inner_dim, 2.0, Ndata, BInnerdata); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, BInnerdata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), dXdata, Bdata, dXdata); // compute T3 = (dy - T2) / Normalizer math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, dim, inner_dim, 1, 1.0, DMuldata, Ndata, 0.0, Bdata); math::Div<T, Context>(buffer->count(), dXdata, Bdata, dXdata); Ndata += inner_dim; } Xdata += buffer->count(); dYdata += buffer->count(); dXdata += buffer->count(); } // release buffer ws()->ReleaseBuffer(buffer_inner); ws()->ReleaseBuffer(buffer); } template <class Context> void L2NormGradientOp<Context>::RunOnDevice() { if (num_axes >= 0) { if (num_axes == 0) num_axes += 1; } else { num_axes = (int)input(0).ndim() - axis; } end_axis = axis + num_axes; CHECK_LE(end_axis, int(input(0).ndim())); // do statistics through [axis, end_axis) outer_dim = input(0).count(0, axis); dim = input(0).count(axis, axis + num_axes); inner_dim = input(0).count(axis + num_axes); if (inner_dim == 1) across_inner = true; else across_inner = false; output(0)->ReshapeLike(input(0)); if (input(0).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(L2NormGradient); #ifdef WITH_CUDA DEPLOY_CUDA(L2NormGradient); #endif OPERATOR_SCHEMA(L2NormGradient).NumInputs(2).NumOutputs(1); class GetL2NormGradient final : public GradientMakerBase { public: GRADIENT_MAKER_CTOR(GetL2NormGradient); vector<OperatorDef> MakeDefs() override { return SingleDef(def.type() + "Gradient", "", vector<string> {I(0), GO(0)}, vector<string> {GI(0)}); } }; REGISTER_GRADIENT(L2Norm, GetL2NormGradient); } // namespace dragon <MSG> Fix the disorder while compiling ops <DFF> @@ -116,6 +116,7 @@ void L2NormGradientOp<Context>::RunWithType() { if (across_inner) { Ndata = norm->template data<T, CPUContext>(); T sum_of_x_mul_dy = math::Dot<T, Context>(buffer->count(), Xdata, dYdata); + if (mode == "MEAN") sum_of_x_mul_dy = sum_of_x_mul_dy / dim; math::Scale<T, Context>(buffer->count(), sum_of_x_mul_dy / Ndata[n] / Ndata[n], Xdata, dXdata); math::Sub<T, Context>(buffer->count(), dYdata, dXdata, dXdata); math::Scal<T, Context>(buffer->count(), T(1.0 / Ndata[n]), dXdata); @@ -123,7 +124,7 @@ void L2NormGradientOp<Context>::RunWithType() { // compute \sum_{i} x_{i, j}dy_{i, j} math::Mul<T, Context>(buffer->count(), Xdata, dYdata, Bdata); math::Gemv<T, Context>(CblasTrans, dim, inner_dim, - 1.0, + mode == "MEAN" ? 1.0 / dim : 1.0, Bdata, DMuldata, 0.0, BInnerdata);
2
Fix the disorder while compiling ops
1
.cc
cc
bsd-2-clause
neopenx/Dragon
1784
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return def stop_everything(sig, frame): """Exit hitch.""" exit(1) def installpackages(): """Install packages with hitchsystem.""" hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, stderr.write("{0} not found.\n".format(python)) exit(1) str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) else: if path.exists(virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python else: stderr.write("{0} not found.\n".format(python)) exit(1) python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') replacements = ('Python ', ''), ('\n', '') str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33) exit(1) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) makedirs(".hitch") # Store absolute directory in .hitch directory to guard against the directory being moved hitch_dir = path.abspath(".hitch") with open(path.join(hitch_dir, "absdir"), "w") as absdir_handle: absdir_handle.write(hitch_dir) pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip")) try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") stderr.flush() @command() @option( '-p', '--packages', default=None, help=( "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> Merge pull request #11 from jonhadfield/master BUG : Fix issue caused by Python 3 version including a letter. <DFF> @@ -75,8 +75,10 @@ def init(python, virtualenv): stderr.write("{0} not found.\n".format(python)) exit(1) - str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '') - tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')]) + python_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8') + replacements = ('Python ', ''), ('\n', '') + str_version = reduce(lambda a, kv: a.replace(*kv), replacements, python_version) + tuple_version = tuple([int(x) for x in str_version.split('.')[:2]]) if tuple_version < (3, 3): stderr.write(languagestrings.YOU_MUST_HAVE_VERSION_ABOVE_PYTHON33)
4
Merge pull request #11 from jonhadfield/master
2
.py
py
agpl-3.0
hitchtest/hitch
1785
<NME> languagestrings.py <BEF> SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH = """\ Create hitch virtualenv using specific python version (e.g. /usr/bin/python3). Defaults to using python3 on the system path.""" SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH = """\ Create hitch virtualenv using specific virtualenv (e.g. /usr/bin/virtualenv). Defaults to using virtualenv on the system path.""" YOU_MUST_HAVE_VIRTUALENV_INSTALLED = """\ You must have virtualenv installed to use hitch. Suggestions: #1 Install via your system's package manager: - On Ubuntu/Debian : sudo apt-get install python-virtualenv - On Fedora : sudo yum install python-virtualenv - On Arch : sudo pacman -Sy python-virtualenv - On Mac OS X : pip install --upgrade virtualenv #2 Install via pip, e.g.: Your app can run with earlier versions of python, but the tests can't. """ HITCH_ALREADY_INITIALIZED = """\ Hitch has already been initialized in this directory or a directory above it. If you wish to re-initialize hitch in this directory, run 'hitch clean' first. """ ERROR_INITIALIZING_HITCH = """\ \nError initializing hitch. Problem checklist:\n * Was there a problem with your internet? To install: - On Ubuntu/Debian : sudo apt-get install python3 - On Fedora : sudo yum install python3 - On Arch : sudo pacman -Sy python3 - On Mac OS X : brew install python3 If your python3 is *not* on the system path with the name 'python3', you can specify the location of its virtualenv executable like so: hitch init --virtualenv=/path/to/python3/bin/virtualenv Or specify the location of the python3 interpreter, e.g. hitch init --python=/path/to/python3/bin/python3 SOMETHING_CORRUPTED = """\ WARNING: Hitch directory was corrupted. Run 'hitch clean' and hitch init again.\n """ Suggestions: #1 You may need to run a sytem upgrade or upgrade your OS. #2 If you have python 3.3+ installed but it is not accessible on the system path with the command 'python3', you can run: hitch init --virtualenv=/path/to/python3/bin/virtualenv OR hitch init --python=/path/to/python3/bin/python3 """ ERROR_INITIALIZING_HITCH = """\ \nError initializing hitch. Problem checklist:\n * Was there a problem with your internet? * Was there a python package being installed that couldn't compile?\n Try searching for any errors printed above or raising an issue at: http://github.com/hitchtest/hitch/issues/ """ HITCH_DIRECTORY_MOVED = """\ The hitch directory '{0}' was moved. "Run 'hitch clean' then run 'hitch init' in this directory: ==> {1} """ HITCH_NOT_INITIALIZED = """\ Hitch has not been initialized in this directory, or any of the directories beneath it:\n""" SOMETHING_CORRUPTED = """\ WARNING: Hitch directory was corrupted. Run 'hitch clean' and hitch init again.\n """ UPDATING_REQUIREMENTS = """\ Updating installed packages to bring them in alignment with the contents of hitchreqs.txt\n""" <MSG> FEATURE : Change when update_requirements is run -- now on installation and upgrade, not on test runs. <DFF> @@ -23,11 +23,6 @@ Hitch must have python 3.3 or higher installed to run. Your app can run with earlier versions of python, but the tests can't. """ -HITCH_ALREADY_INITIALIZED = """\ -Hitch has already been initialized in this directory or a directory above it. -If you wish to re-initialize hitch in this directory, run 'hitch clean' first. -""" - ERROR_INITIALIZING_HITCH = """\ \nError initializing hitch. Problem checklist:\n * Was there a problem with your internet? @@ -48,3 +43,6 @@ Hitch has not been initialized in this directory, or any of the directories bene SOMETHING_CORRUPTED = """\ WARNING: Hitch directory was corrupted. Run 'hitch clean' and hitch init again.\n """ + +UPDATING_REQUIREMENTS = """\ +Updating installed packages to bring them in alignment with the contents of hitchreqs.txt\n"""
3
FEATURE : Change when update_requirements is run -- now on installation and upgrade, not on test runs.
5
.py
py
agpl-3.0
hitchtest/hitch
1786
<NME> README.md <BEF> shiro-redis ============= [![Build Status](https://travis-ci.org/alexxiyang/shiro-redis.svg?branch=master)](https://travis-ci.org/alexxiyang/shiro-redis) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.crazycake/shiro-redis) shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! # Download You use either of the following 2 ways to include `shiro-redis` into your project * use `git clone https://github.com/alexxiyang/shiro-redis.git` to clone project to your local workspace and build jar file by your self * add maven dependency ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>3.3.1</version> </dependency> ``` > **Note:**\ > 3.3.0 is compiled by java11\ > 3.3.1 is compiled by java8 ## shiro-core/jedis Version Comparison Charts | shiro-redis | shiro | jedis | | :----------------:| :-------: | :-------: | | 3.2.3 | 1.3.2 | 2.9.0 | | 3.3.0 (java11) | 1.6.0 | 3.3.0 | | 3.3.1 (java8) | 1.6.0 | 3.3.0 | # Before use Here is the first thing you need to know. Shiro-redis needs an id field to identify your authorization object in Redis. So please make sure your principal class has a field which you can get unique id of this object. Please setting this id field name by `cacheManager.principalIdFieldName = <your id field name of principal object>` For example: If you create `SimpleAuthenticationInfo` like this: ```java @Override protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException { UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken)token; UserInfo userInfo = new UserInfo(); userInfo.setUsername(usernamePasswordToken.getUsername()); return new SimpleAuthenticationInfo(userInfo, "123456", getName()); } ``` Then the `userInfo` object is your principal object. You need to make sure `UserInfo` has an unique field for Redis to identify it. Take `userId` as an example: ```java public class UserInfo implements Serializable{ private Integer userId private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getUserId() { return this.userId; } } ``` Put userId as the value of `cacheManager.principalIdFieldName`, like this: ```properties cacheManager.principalIdFieldName = userId ``` If you're using Spring, the configuration should be ```xml <property name="principalIdFieldName" value="userId" /> ``` Then `shiro-redis` will call `userInfo.getUserId()` to get the id for saving Redis object. # How to configure ? You can configure `shiro-redis` either in `shiro.ini` or in `spring-*.xml` ## shiro.ini Here is the configuration example for shiro.ini. ### Redis Standalone If you are running Redis in Standalone mode ```properties [main] #==================================== # shiro-redis configuration [start] #==================================== #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisManager # Redis host. If you don't specify host the default value is 127.0.0.1:6379 redisManager.host = 127.0.0.1:6379 #=================================== # Redis Manager [end] #=================================== #========================================= # Redis session DAO [start] #========================================= # Create redisSessionDAO redisSessionDAO = org.crazycake.shiro.RedisSessionDAO # Use redisManager as cache manager redisSessionDAO.redisManager = $redisManager sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager sessionManager.sessionDAO = $redisSessionDAO securityManager.sessionManager = $sessionManager #========================================= # Redis session DAO [end] #========================================= #========================================== # Redis cache manager [start] #========================================== # Create cacheManager cacheManager = org.crazycake.shiro.RedisCacheManager # Principal id field name. The field which you can get unique id to identify this principal. # For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc. # Remember to add getter to this id field. For example, `getId()`, `getUserId()`, `getEmail()`, etc. # Default value is id, that means your principal object must has a method called `getId()` cacheManager.principalIdFieldName = id # Use redisManager as cache manager cacheManager.redisManager = $redisManager securityManager.cacheManager = $cacheManager #========================================== # Redis cache manager [end] #========================================== #================================= # shiro-redis configuration [end] #================================= ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-tutorial) for you to understand how to configure `shiro-redis` in `shiro.ini`. ### Redis Sentinel if you're using Redis Sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisSentinelManager # Sentinel host. If you don't specify host the default value is 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 redisManager.host = 127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381 # Sentinel master name redisManager.masterName = mymaster #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you're using redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```properties #=================================== # Redis Manager [start] #=================================== # Create redisManager redisManager = org.crazycake.shiro.RedisClusterManager # Redis host and port list redisManager.host = 192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005 #=================================== # Redis Manager [end] #=================================== ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Spring If you are using Spring ### Redis Standalone If you are running Redis in Standalone mode ```xml <!-- shiro-redis configuration [start] --> <!-- Redis Manager [start] --> <bean id="redisManager" class="org.crazycake.shiro.RedisManager"> <property name="host" value="127.0.0.1:6379"/> </bean> <!-- Redis Manager [end] --> <!-- Redis session DAO [start] --> <bean id="redisSessionDAO" class="org.crazycake.shiro.RedisSessionDAO"> <property name="redisManager" ref="redisManager" /> </bean> <bean id="sessionManager" class="org.apache.shiro.web.session.mgt.DefaultWebSessionManager"> <property name="sessionDAO" ref="redisSessionDAO" /> </bean> <!-- Redis session DAO [end] --> <!-- Redis cache manager [start] --> <bean id="cacheManager" class="org.crazycake.shiro.RedisCacheManager"> <property name="redisManager" ref="redisManager" /> </bean> <!-- Redis cache manager [end] --> <bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager"> <property name="sessionManager" ref="sessionManager" /> <property name="cacheManager" ref="cacheManager" /> <!-- other configurations --> <property name="realm" ref="exampleRealm"/> <property name="rememberMeManager.cipherKey" value="kPH+bIxk5D2deZiIxcaaaA==" /> </bean> <!-- shiro-redis configuration [end] --> ``` For complete configurable options list, check [Configurable Options](#configurable-options). Here is a [tutorial project](https://github.com/alexxiyang/shiro-redis-spring-tutorial) for you to understand how to configure `shiro-redis` in spring configuration file. ### Redis Sentinel If you use redis sentinel, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisSentinelManager"> <property name="host" value="127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381"/> <property name="masterName" value="mymaster"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ### Redis Cluster If you use redis cluster, please replace the `redisManager` configuration of the standalone version into the following: ```xml <!-- shiro-redis configuration [start] --> <!-- shiro redisManager --> <bean id="redisManager" class="org.crazycake.shiro.RedisClusterManager"> <property name="host" value="192.168.21.3:7000,192.168.21.3:7001,192.168.21.3:7002,192.168.21.3:7003,192.168.21.3:7004,192.168.21.3:7005"/> </bean> ``` For complete configurable options list, check [Configurable Options](#configurable-options). ## Serializer Since redis only accept `byte[]`, there comes a serializer problem. Shiro-redis is using `StringSerializer` as key serializer and `ObjectSerializer` as value serializer. You can use your own custom serializer, as long as this custom serializer implements `org.crazycake.shiro.serializer.RedisSerializer` For example, we can change the charset of keySerializer like this ```properties # If you want change charset of keySerializer or use your own custom serializer, you need to define serializer first # # cacheManagerKeySerializer = org.crazycake.shiro.serializer.StringSerializer # Supported encodings refer to https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html # UTF-8, UTF-16, UTF-32, ISO-8859-1, GBK, Big5, etc # # cacheManagerKeySerializer.charset = UTF-8 # cacheManager.keySerializer = $cacheManagerKeySerializer ``` These 4 options that you can replace them with your cutom serializers: - cacheManager.keySerializer - cacheManager.valueSerializer - redisSessionDAO.keySerializer - redisSessionDAO.valueSerializer ## Configurable Options Here are all the available options you can use in `shiro-redis` configuration file. ### RedisManager | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | masterName | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | soTimeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | maxAttempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | password | | Redis password | | database | `0` | Redis database. Default value is 0 | | jedisPoolConfig | `new redis.clients.jedis.JedisPoolConfig()` | JedisPoolConfig. You can create your own JedisPoolConfig instance and set attributes as you wish<br>Most of time, you don't need to set jedisPoolConfig<br>Here is an example.<br>`jedisPoolConfig = redis.clients.jedis.JedisPoolConfig`<br>`jedisPoolConfig.testWhileIdle = false`<br>`redisManager.jedisPoolConfig = jedisPoolConfig` | | count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | jedisPool | `null` | **Only used for sentinel mode or single mode**<br>You can create your own JedisPool instance and set attributes as you wish | ### RedisSessionDAO | Title | Default | Description | | :------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | keyPrefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | sessionInMemoryTimeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | sessionInMemoryEnabled | `true` | Whether or not enable temporary save session in ThreadLocal | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | ### CacheManager | Title | Default | Description | | :--------------------| :------------------- | :---------------------------| | redisManager | | RedisManager which you just configured above (Required) | | principalIdFieldName | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | keyPrefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | | keySerializer | `org.crazycake.shiro.serializer.StringSerializer` | The key serializer of cache manager<br>You can change the implement of key serializer or the encoding of StringSerializer.<br>Supported encodings refer to [Supported Encodings](https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html). Such as `UTF-8`, `UTF-16`, `UTF-32`, `ISO-8859-1`, `GBK`, `Big5`, etc<br>For more detail, check [Serializer](#serializer) | | valueSerializer | `org.crazycake.shiro.serializer.ObjectSerializer` | The value serializer of cache manager<br>You can change the implement of value serializer<br>For more detail, check [Serializer](#serializer) | # Spring boot starter Using `Spring-Boot` integration is the easiest way to integrate `shiro-redis` into a Spring-base application. > Note: `shiro-redis-spring-boot-starter` version `3.2.1` is based on `shiro-spring-boot-web-starter` version `1.4.0-RC2` First include the `shiro-redis` Spring boot starter dependency in you application classpath ```xml <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis-spring-boot-starter</artifactId> <version>3.3.1</version> </dependency> ``` The next step depends on whether you've created your own `SessionManager` or `SessionsSecurityManager`. ## If you haven't created your own `SessionManager` or `SessionsSecurityManager` If you don't have your own `SessionManager` or `SessionsSecurityManager` in your configuration, `shiro-redis-spring-boot-starter` will create `RedisSessionDAO` and `RedisCacheManager` for you. Then inject them into `SessionManager` and `SessionsSecurityManager` automatically. So, You are all set. Enjoy it! ## If you have created your own `SessionManager` or `SessionsSecurityManager` If you have created your own `SessionManager` or `SessionsSecurityManager` like this: ```java @Bean public SessionsSecurityManager securityManager(List<Realm> realms) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); // other stuff... return securityManager; } ``` Then inject `redisSessionDAO` and `redisCacheManager` which created by `shiro-redis-spring-boot-starter` already ```java @Autowired RedisSessionDAO redisSessionDAO; @Autowired RedisCacheManager redisCacheManager; ``` Inject them into your own `SessionManager` and `SessionsSecurityManager` ```java @Bean public SessionManager sessionManager() { DefaultWebSessionManager sessionManager = new DefaultWebSessionManager(); // inject redisSessionDAO sessionManager.setSessionDAO(redisSessionDAO); // other stuff... return sessionManager; } @Bean public SessionsSecurityManager securityManager(List<Realm> realms, SessionManager sessionManager) { DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager(realms); //inject sessionManager securityManager.setSessionManager(sessionManager); // inject redisCacheManager securityManager.setCacheManager(redisCacheManager); // other stuff... return securityManager; } ``` For full example, see [shiro-redis-spring-boot-tutorial](https://github.com/alexxiyang/shiro-redis-spring-boot-tutorial) ### Configuration Properties Here are all available options you can use in Spring-boot starter configuration | Title | Default | Description | | :--------------------------------------------------| :------------------- | :---------------------------| | shiro-redis.enabled | `true` | Enables shiro-redis’s Spring module | | shiro-redis.redis-manager.deploy-mode | `standalone` | Redis deploy mode. Options: `standalone`, `sentinel`, 'cluster' | | shiro-redis.redis-manager.host | `127.0.0.1:6379` | Redis host. If you don't specify host the default value is `127.0.0.1:6379`. If you run redis in sentinel mode or cluster mode, separate host names with comma, like `127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381` | | shiro-redis.redis-manager.master-name | `mymaster` | **Only used for sentinel mode**<br>The master node of Redis sentinel mode | | shiro-redis.redis-manager.timeout | `2000` | Redis connect timeout. Timeout for jedis try to connect to redis server(In milliseconds) | | shiro-redis.redis-manager.so-timeout | `2000` | **Only used for sentinel mode or cluster mode**<br>The timeout for jedis try to read data from redis server | | shiro-redis.redis-manager.max-attempts | `3` | **Only used for cluster mode**<br>Max attempts to connect to server | | shiro-redis.redis-manager.password | | Redis password | | shiro-redis.redis-manager.database | `0` | Redis database. Default value is 0 | | shiro-redis.redis-manager.count | `100` | Scan count. Shiro-redis use Scan to get keys, so you can define the number of elements returned at every iteration. | | shiro-redis.session-dao.expire | `-2` | Redis cache key/value expire time. The expire time is in second.<br>Special values:<br>`-1`: no expire<br>`-2`: the same timeout with session<br>Default value: `-2`<br>**Note**: Make sure expire time is longer than session timeout. | | shiro-redis.session-dao.key-prefix | `shiro:session:` | Custom your redis key prefix for session management<br>**Note**: Remember to add colon at the end of prefix. | | shiro-redis.session-dao.session-in-memory-timeout | `1000` | When we do signin, `doReadSession(sessionId)` will be called by shiro about 10 times. So shiro-redis save Session in ThreadLocal to remit this problem. sessionInMemoryTimeout is expiration of Session in ThreadLocal. <br>Most of time, you don't need to change it. | | shiro-redis.session-dao.session-in-memory-enabled | `true` | Whether or not enable temporary save session in ThreadLocal | | shiro-redis.cache-manager.principal-id-field-name | `id` | Principal id field name. The field which you can get unique id to identify this principal.<br>For example, if you use UserInfo as Principal class, the id field maybe `id`, `userId`, `email`, etc.<br>Remember to add getter to this id field. For example, `getId()`, `getUserId(`), `getEmail()`, etc.<br>Default value is `id`, that means your principal object must has a method called `getId()` | | shiro-redis.cache-manager.expire | `1800` | Redis cache key/value expire time. <br>The expire time is in second. | | shiro-redis.cache-manager.key-prefix | `shiro:cache:` | Custom your redis key prefix for cache management<br>**Note**: Remember to add colon at the end of prefix. | ## Working with `spring-boot-devtools` If you are using `shiro-redis` with `spring-boot-devtools`. Please add this line to `resources/META-INF/spring-devtools.properties` (Create it if there is no this file): ```ini restart.include.shiro-redis=/shiro-[\\w-\\.]+jar ``` # If you found any bugs Please create the issue 可以用中文 <MSG> Update README.md <DFF> @@ -21,8 +21,8 @@ You use either of the following 2 ways to include `shiro-redis` into your projec ``` > **Note:**\ -> 3.3.0 is compiled by java11\ -> 3.3.1 is compiled by java8 +> 3.3.0 is compiled in java11 by mistake. +> Please use 3.3.1 which is compiled in java8 ## shiro-core/jedis Version Comparison Charts
2
Update README.md
2
.md
md
mit
alexxiyang/shiro-redis
1787
<NME> what_does_hitch_init_do.rst <BEF> ADDFILE <MSG> DOCS : What does hitch init do? <DFF> @@ -0,0 +1,16 @@ +What does hitch init do? +======================== + +The "hitch init" command creates a .hitch directory in the current directory +if one does not already exist. Inside this directory it installs a python +virtualenv with the "hitchtest" package and dependencies, or, if a +hitchreqs.txt is found, all of the packages specified in "hitchreqs.txt" + +This virtualenv contains only the packages required to run tests. +It does *not* contain the packages required to actually run your app. +The tests themselves will take care of setting those up in a separate +virtualenv. + +See also: + +* :doc:`/faq/what_does_the_init_script_do`
16
DOCS : What does hitch init do?
0
.rst
rst
agpl-3.0
hitchtest/hitch
1788
<NME> index.rst <BEF> Hitch Plugin Documentation ========================== Documentation for :doc:`/glossary/hitch_plugin`s. Contents: .. toctree:: :glob: :maxdepth: 1 * <MSG> DOCS : Updated docs <DFF> @@ -1,7 +1,7 @@ Hitch Plugin Documentation ========================== -Documentation for :doc:`/glossary/hitch_plugin`s. +:doc:`/glossary/hitch_plugin` documentation. Contents:
1
DOCS : Updated docs
1
.rst
rst
agpl-3.0
hitchtest/hitch
1789
<NME> commandline.py <BEF> """High level command line interface to hitch.""" from subprocess import call, PIPE, STDOUT, Popen from hitch.click import command, group, argument, option from os import path, makedirs, listdir, kill, remove from sys import stderr, stdout, exit, modules, argv from functools import partial, reduce from hitch import hitchdir, languagestrings import shutil import signal import copy class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) process.communicate() if process.returncode != 0: raise CalledProcessError return @group() def cli(): pass signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([hitchsystem, "installpackages", ]) signal.signal(signal.SIGINT, stop_everything) def update_requirements(): """Check hitchreqs.txt match what's installed via pip freeze. If not, update.""" stdout.write(languagestrings.UPDATING_REQUIREMENTS) pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt") pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n') hitchreqs_handle = "" with open(hitchreqs_filename, "r") as hitchreqs_handle: hitchreqs = hitchreqs_handle.read().split('\n') if not sorted(pip_freeze) == sorted(hitchreqs): call([pip, "install", "-r", "hitchreqs.txt"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) @group() def cli(): pass @command() @option( '-p', '--python', default=None, help=languagestrings.SPECIFY_PYTHON_TO_CREATE_VIRTUALENV_WITH ) @option( '-v', '--virtualenv', default=None, help=languagestrings.SPECIFY_VIRTUALENV_TO_CREATE_HITCH_WITH ) def init(python, virtualenv): """Initialize hitch in this directory.""" if virtualenv is None: if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_VIRTUALENV_INSTALLED) stderr.flush() exit(1) virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "") else: if path.exists(virtualenv): if python is None: python = path.join(path.dirname(virtualenv), "python") else: stderr.write("{0} not found.\n".format(virtualenv)) if python is None: if call(["which", "python3"], stdout=PIPE, stderr=PIPE) != 0: stderr.write(languagestrings.YOU_MUST_HAVE_PYTHON3_INSTALLED) stderr.flush() exit(1) python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "") else: if path.exists(python): python3 = python virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "-U", "pip"]) check_call([pip, "install", "unixpackage"]) unixpackage = path.abspath(path.join(".hitch", "virtualenv", "bin", "unixpackage")) if path.exists("system.packages"): check_call([unixpackage, "install", "--polite", "-r", "system.packages"]) check_call([ unixpackage, "install", "--polite", "python-dev", "python3-dev", "libtool", "automake", "cmake" ]) if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) if hitchdir.hitch_exists(): hitchdir.check_hitch_directory_integrity() update_requirements() exit(0) with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() try: check_call([ virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "--upgrade", "pip"]) check_call([pip, "install", "--upgrade", "setuptools"]) check_call([pip, "install", "unixpackage", "hitchsystem"]) installpackages() if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) else: check_call([pip, "install", "hitchtest"]) check_call([pip, "install", "hitchquickstart"]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) signal.signal(signal.SIGINT, signal.SIG_IGN) check_call([path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchquickstart")), ]) signal.signal(signal.SIGINT, stop_everything) installpackages() except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() exit(1) def get_pip(): """Get the file path to the hitch pip.""" return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") @command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd") @argument('arguments', nargs=-1) def runpackage(arguments): # Generic method to run any installed app in the virtualenv whose name starts with hitch* hitchdir.check_hitch_directory_integrity() binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{0}".format(argv[1])) command = [binfile, ] + argv[2:] # When receiving an exit signal, just forward it to process child. def forward_signal_to_child(pid, signum, frame): kill(pid, signum) process = Popen(command) signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid)) signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid)) return_code = process.wait() exit(return_code) @command() @argument('package', required=True) def uninstall(package): """Uninstall hitch package.""" hitchdir.check_hitch_directory_integrity() pip = get_pip() call([pip, "uninstall", package] ) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) update_requirements() @command() @argument('package', required=True) def install(package): """Install hitch package.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def upgrade(): """Upgrade all installed hitch packages.""" hitchdir.check_hitch_directory_integrity() update_requirements() pip = get_pip() package_list = [ p for p in check_output([pip, "freeze"]).decode('utf8').split('\n') if p != "" and "==" in p ] version_fixed_package_list = [p.split("==")[0] for p in package_list] for package in version_fixed_package_list: call([pip, "install", package, "-U", ]) pip_freeze = check_output([pip, "freeze"]).decode('utf8') with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) installpackages() @command() def freeze(): """List installed hitch packages.""" hitchdir.check_hitch_directory_integrity() pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip") call([pip, "freeze", ]) @command() def clean(): """Remove the hitch directory entirely.""" if hitchdir.hitch_exists(): hitchdir.remove_hitch_directory_if_exists() else: stderr.write("No hitch directory found. Doing nothing.\n") def run(): """Run hitch bootstrap CLI""" def stop_everything(sig, frame): """Exit hitch.""" exit(1) signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) "Specify precise packages to remove - " "e.g. postgresql, postgresql-9.3.9, python, python2.6.8" ) ) def cleanpkg(packages): """Remove installed packages from the .hitchpkg directory.""" hitchpkg = path.join(path.expanduser("~"), ".hitchpkg") if path.exists(hitchpkg): if packages is None: shutil.rmtree(hitchpkg) else: for file_or_dir in listdir(hitchpkg): if file_or_dir.startswith(packages): if path.isdir(path.join(hitchpkg, file_or_dir)): shutil.rmtree(path.join(hitchpkg, file_or_dir)) else: remove(path.join(hitchpkg, file_or_dir)) def run(): """Run hitch bootstrap CLI""" signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything) signal.signal(signal.SIGQUIT, stop_everything) if hitchdir.hitch_exists(): # Get packages from bin folder that are hitch related python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python") if path.exists(python_bin): packages = [ package.replace("hitch", "") for package in listdir( path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin") ) if package.startswith("hitch") and package != "hitch" ] # Add commands that start with "hitch" to the list of commands available (e.g. hitchtest, hitchsmtp) for package in packages: cmd = copy.deepcopy(runpackage) cmd.name = package try: description = check_output([ python_bin, '-c', 'import sys;sys.stdout.write(__import__("hitch{0}").commandline.cli.help)'.format( package ) ]).decode('utf8') except CalledProcessError: description = "" cmd.help = description cmd.short_help = description cli.add_command(cmd) cli.add_command(install) cli.add_command(uninstall) cli.add_command(upgrade) cli.add_command(freeze) else: stderr.write(languagestrings.SOMETHING_CORRUPTED) cli.add_command(clean) cli.add_command(cleanpkg) cli.add_command(init) cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory()) else: cli.add_command(init) cli.add_command(clean) cli.add_command(cleanpkg) cli.help = "Hitch bootstrapper - '.hitch' directory not detected here." cli() if __name__ == '__main__': run() <MSG> FEATURE : Run hitchsystem installpackages before and after initializing .hitch <DFF> @@ -9,14 +9,17 @@ import shutil import signal import copy + class CalledProcessError(Exception): """Re-implemented CalledProcessError, since it is not available < python 2.7.""" pass + def check_output(command, stdout=PIPE, stderr=PIPE): """Re-implemented subprocess.check_output since it is not available < python 2.7.""" return Popen(command, stdout=stdout, stderr=stderr).communicate()[0] + def check_call(command, shell=False): """Re-implemented subprocess.check_call since it is not available < python 2.7.""" process = Popen(command, shell=shell) @@ -25,6 +28,12 @@ def check_call(command, shell=False): raise CalledProcessError return + +def stop_everything(sig, frame): + """Exit hitch.""" + exit(1) + + @group() def cli(): pass @@ -92,17 +101,13 @@ def init(python, virtualenv): virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3 ]) check_call([pip, "install", "-U", "pip"]) + check_call([pip, "install", "unixpackage", "hitchsystem"]) - check_call([pip, "install", "unixpackage"]) + hitchsystem = path.abspath(path.join(".hitch", "virtualenv", "bin", "hitchsystem")) - unixpackage = path.abspath(path.join(".hitch", "virtualenv", "bin", "unixpackage")) - if path.exists("system.packages"): - check_call([unixpackage, "install", "--polite", "-r", "system.packages"]) - - check_call([ - unixpackage, "install", "--polite", - "python-dev", "python3-dev", "libtool", "automake", "cmake" - ]) + signal.signal(signal.SIGINT, signal.SIG_IGN) + check_call([hitchsystem, "installpackages", ]) + signal.signal(signal.SIGINT, stop_everything) if path.exists("hitchreqs.txt"): check_call([pip, "install", "-r", "hitchreqs.txt"]) @@ -113,6 +118,10 @@ def init(python, virtualenv): with open("hitchreqs.txt", "w") as hitchreqs_handle: hitchreqs_handle.write(pip_freeze) + + signal.signal(signal.SIGINT, signal.SIG_IGN) + check_call([hitchsystem, "installpackages", ]) + signal.signal(signal.SIGINT, stop_everything) except CalledProcessError: stderr.write(languagestrings.ERROR_INITIALIZING_HITCH) hitchdir.remove_hitch_directory_if_exists() @@ -252,10 +261,6 @@ def cleanpkg(packages): def run(): """Run hitch bootstrap CLI""" - def stop_everything(sig, frame): - """Exit hitch.""" - exit(1) - signal.signal(signal.SIGINT, stop_everything) signal.signal(signal.SIGTERM, stop_everything) signal.signal(signal.SIGHUP, stop_everything)
18
FEATURE : Run hitchsystem installpackages before and after initializing .hitch
13
.py
py
agpl-3.0
hitchtest/hitch
1790
<NME> RedisSentinelManager.java <BEF> ADDFILE <MSG> add sentinel support <DFF> @@ -0,0 +1,229 @@ +package org.crazycake.shiro; + +import redis.clients.jedis.*; + +import java.util.HashSet; +import java.util.Set; + +/** + * support jedis sentinel + * @create 2018-02-26 11:16 + **/ + +public class RedisSentinelManager implements IRedisManager{ + + private String host = "127.0.0.1:26379"; + private String masterName = "mymaster"; + + private static final int DEFAULT_EXPIRE = 3600; + // expire time in seconds + private int expire = DEFAULT_EXPIRE; + + // timeout for jedis try to connect to redis server, not expire time! In milliseconds + private int timeout = Protocol.DEFAULT_TIMEOUT; + + // timeout for jedis try to read data from redis server + private int soTimeout = Protocol.DEFAULT_TIMEOUT; + + private String password; + + private int database = Protocol.DEFAULT_DATABASE; + + private volatile JedisSentinelPool jedisSentinelPool = null; + + private void init() { + synchronized (this) { + if (jedisSentinelPool == null) { + jedisSentinelPool = new JedisSentinelPool(masterName,getJedisSentinelSet(),new JedisPoolConfig(),timeout,soTimeout,password,database); + } + } + } + + private Set<String> getJedisSentinelSet(){ + String[] hostAndPortArr = host.split(","); + Set<String> hostAndPorts = new HashSet<String>(); + for(String host : hostAndPortArr){ + hostAndPorts.add(host); + } + return hostAndPorts; + } + + private void checkAndInit() { + if (jedisSentinelPool == null) { + init(); + } + } + + /** + * get value from redis + * @param key + * @return + */ + public byte[] get(byte[] key){ + checkAndInit(); + if (key == null) { + return null; + } + byte[] value = null; + Jedis jedis = jedisSentinelPool.getResource(); + try{ + value = jedis.get(key); + }finally{ + jedis.close(); + } + return value; + } + + /** + * set + * @param key + * @param value + * @return + */ + public byte[] set(byte[] key,byte[] value){ + checkAndInit(); + if (key == null) { + return null; + } + Jedis jedis = jedisSentinelPool.getResource(); + try{ + jedis.set(key,value); + if(this.expire != 0){ + jedis.expire(key, this.expire); + } + }finally{ + jedis.close(); + } + return value; + } + + /** + * set + * @param key + * @param value + * @param expire + * @return + */ + public byte[] set(byte[] key,byte[] value,int expire){ + checkAndInit(); + if (key == null) { + return null; + } + Jedis jedis = jedisSentinelPool.getResource(); + try{ + jedis.set(key,value); + if(expire != 0){ + jedis.expire(key, expire); + } + }finally{ + jedis.close(); + } + return value; + } + + /** + * del + * @param key + */ + public void del(byte[] key){ + checkAndInit(); + if (key == null) { + return; + } + Jedis jedis = jedisSentinelPool.getResource(); + try{ + jedis.del(key); + }finally{ + jedis.close(); + } + } + + /** + * size + */ + public Long dbSize(){ + checkAndInit(); + Long dbSize = 0L; + Jedis jedis = jedisSentinelPool.getResource(); + try{ + dbSize = jedis.dbSize(); + }finally{ + jedis.close(); + } + return dbSize; + } + + /** + * keys + * @param pattern + * @return + */ + public Set<byte[]> keys(byte[] pattern){ + checkAndInit(); + Set<byte[]> keys = null; + Jedis jedis = jedisSentinelPool.getResource(); + try{ + keys = jedis.keys(pattern); + }finally{ + jedis.close(); + } + return keys; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public String getMasterName() { + return masterName; + } + + public void setMasterName(String masterName) { + this.masterName = masterName; + } + + public int getExpire() { + return expire; + } + + public void setExpire(int expire) { + this.expire = expire; + } + + public int getTimeout() { + return timeout; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getSoTimeout() { + return soTimeout; + } + + public void setSoTimeout(int soTimeout) { + this.soTimeout = soTimeout; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public int getDatabase() { + return database; + } + + public void setDatabase(int database) { + this.database = database; + } + +}
229
add sentinel support
0
.java
java
mit
alexxiyang/shiro-redis
1791
<NME> op_kernel.cu <BEF> #ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<float> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } template<> void Empty<float16, CUDAContext>() { _Empty<float16> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout(const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>(const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* context) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>(count, float(0), float(UINT_MAX), mask); _Dropout<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, x, mask, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _DropoutGrad(const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>(const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, dy, mask, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y) { if (channel_shared) { _PRelu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluGrad(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[0]); } } template <typename T> __global__ void _PReluGradNCHW(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template <typename T> __global__ void _PReluGradNHWC(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx) { if (channel_shared) { _PReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluWGradBcast(const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>(const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw) { const int cdim = channels * dim; _PReluWGradBcast<float> << < GET_BLOCKS(cdim), CUDA_NUM_THREADS >> >(cdim, rows, row_offset, dy, x, bcast_dw); CUDA_POST_KERNEL_CHECK; if (channel_shared) { float w_sum = math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier); math::AddScalar<float, CUDAContext>(1, w_sum, dw); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>(CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>(CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu(const int count, const T* x, const float alpha, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (std::exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>(const int count, const float* x, const float alpha, float* y) { _Elu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, alpha, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _EluGrad(const int count, const T* dy, const T* y, const float alpha, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)); } } template<> void EluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float alpha, float* dx) { _EluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, alpha, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu(const int count, const T* x, const float slope, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>(const int count, const float* x, const float slope, float* y) { _Relu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, slope, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ReluHalf(const int count, const half* x, const float slope, half* y) { const half kSlope = __float2half(slope); const half kZero = __float2half(0.0); CUDA_KERNEL_LOOP(idx, count) { #if __CUDA_ARCH__ >= 530 y[idx] = __hgt(x[idx], kZero) ? x[idx] : __hmul(x[idx], kSlope); #endif } } template<> void Relu<float16, CUDAContext>(const int count, const float16* x, const float slope, float16* y) { _ReluHalf<half> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(x), slope, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ReluGrad(const int count, const T* dy, const T* y, const float slope, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + slope * (y[idx] <= 0)); } } template<> void ReluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float slope, float* dx) { _ReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, slope, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (std::exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>(const int count, const float* x, float* y) { _SElu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SEluGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SEluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid(const int n, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>(const int count, const float* x, float* y) { _Sigmoid<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SigmoidGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass(const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max(x[(o_idx * classes + c) * inner_dim + i_idx], max_val); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp(const int count, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = std::exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass(const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* context) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); _SoftmaxExp<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, y); _SoftmaxSumClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SoftmaxDot(const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += (y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx]); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(i, count) { y[i] = std::tanh(x[i]); } } template<> void Tanh<float, CUDAContext>(const int count, const float* x, float* y) { _Tanh<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TanhGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _TanhGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** arithmetic.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip(const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>(const int count, const float low, const float high, const float* x, float* mask, float* y) { _Clip<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, low, high, x, mask, y); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _ScaleWithoutBias(const int n, const T* x, const T* scale, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx]; } } template <typename T> __global__ void _ScaleWithBias(const int n, const T* x, const T* scale, const T* bias, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx] + bias[scale_idx]; } } template<> void Scale<float, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, Bdata, scale_dim, inner_dim, Ydata); else _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, scale_dim, inner_dim, Ydata); } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ScaleWithoutBiasHalf(const int n, const half* x, const half* scale, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hmul(x[idx], scale[scale_idx]); #endif } } template <typename T> __global__ void _ScaleWithBiasHalf(const int n, const half* x, const half* scale, const half* bias, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hadd(__hmul(x[idx], scale[scale_idx]), bias[scale_idx]); #endif } } template<> void Scale<float16, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float16, CUDAContext>(); auto* Ydata = y->mutable_data<float16, CUDAContext>(); auto* Sdata = gamma->data<float16, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float16, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), reinterpret_cast<const half*>(Bdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); else _ScaleWithoutBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); } #endif template <> void ScaleGrad<float, CUDAContext>(const int axis, Tensor* dy, Tensor* gamma, Tensor* dx) { const int count = dx->count(); const int inner_dim = dx->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* dYdata = dy->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dYdata, Sdata, scale_dim, inner_dim, dXdata); } /******************** cast.float2half ********************/ #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _FloatToHalfKernel(const int count, const float* x, half* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = __float2half(x[idx]); } } template <> void Float2Half<float, CUDAContext>(const int count, const float* x, float16* y) { _FloatToHalfKernel<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal(const int count, const T* a, const T* b, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>(const int count, const float* a, const float* b, float* y) { _Equal<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, a, b, y); CUDA_POST_KERNEL_CHECK; } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad(const int count, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>(const int count, const float* dy, float* dx) { _AbsGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy(const int count, const T* x, const T* targets, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = std::log(1 + std::exp(x[idx] - 2 * x[idx] * (x[idx] >= 0))) + x[idx] * ((x[idx] >= 0) - targets[idx]); } } template <> void SigmoidCrossEntropy<float, CUDAContext>(const int count, const float* x, const float* targets, float* loss) { _SigmoidCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, targets, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1(const int count, const float sigma2, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) y[idx] = 0.5 * val * val * sigma2; else y[idx] = abs_val - 0.5 / sigma2; } } template<> void SmoothL1<float, CUDAContext>(const int count, const float sigma2, const float* x, float* y) { _SmoothL1<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SmoothL1Grad(const int count, const float sigma2, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) dx[idx] = val * sigma2; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>(const int count, const float sigma2, const float* dy, float* dx) { _SmoothL1Grad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy(const int count, const T* prob, const T* target, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>(const int count, const float* prob, const float* target, float* loss) { _SoftmaxCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, prob, target, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename T> __global__ void _SparseSoftmaxCrossEntropy(const int count, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { loss[idx] = -log(max(prob[(o_idx * classes + label) * inner_dim + i_idx], FLT_MIN)); valid[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxCrossEntropyGrad(const int count, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { dx[(o_idx * classes + label) * inner_dim + i_idx] -= 1; valid[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_focal_loss ********************/ template <typename T> __global__ void _SparseSoftmaxFocalScale(const int count, const float gamma, const T* prob, T* scale) { CUDA_KERNEL_LOOP(idx, count) { scale[idx] = std::pow((1.0f - prob[idx]), gamma); } } template <typename T> __global__ void _SparseSoftmaxFocalLoss(const int count, const float pos_alpha, const float neg_alpha, const int neg_id, T* scale, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; scale[t_] = label > neg_id ? pos_alpha * scale[t_] : neg_alpha * scale[t_]; loss[idx] = -scale[t_] * std::log(max(prob[t_], FLT_MIN)); valid[idx] = label > neg_id ? 1 : 0; } } } template <> void SparseSoftmaxFocalLoss<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, float* scale, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalScale<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, gamma, prob, scale); _SparseSoftmaxFocalLoss<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, pos_alpha, neg_alpha, neg_id, scale, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxFocalLossGrad(const int count, const float gamma, const int neg_id, const float eps, const T* scale, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; T grad = -gamma * (scale[t_] / max((1.0f - prob[t_]), eps)) * std::log(max(prob[t_], FLT_MIN)) * prob[t_] + scale[t_]; for (int c = 0; c < classes; c++) { const int i_ = (o_idx * classes + c) * inner_dim + i_idx; if (c == label) { dx[i_] = grad * (prob[t_] - 1); } else { dx[i_] = grad * prob[i_]; } } valid[idx] = label > neg_id ? 1 : 0; } } } template<> void SparseSoftmaxFocalLossGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float gamma, const int neg_id, const float eps, const float* scale, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalLossGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, gamma, neg_id, eps, scale, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <> void ImageData<float, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ImageData<float, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Arange(const int count, const int start, const int step, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>(const int count, const int start, const int step, float* y) { _Arange<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } template<> void Arange<int, CUDAContext>(const int count, const int start, const int step, int* y) { _Arange<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Argmax(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T max_val = -FLT_MAX; int max_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val > max_val) { max_val = val; max_idx = j; } } y[idx] = max_idx; } } template<> void Argmax<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmax<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmin ********************/ template <typename T> __global__ void _Argmin(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T min_val = FLT_MAX; int min_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val < min_val) { min_val = val; min_idx = j; } } y[idx] = min_idx; } } template<> void Argmin<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmin<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.at ********************/ template <typename T> __global__ void _CanonicalAxis(const int count, const int dim, T* y) { CUDA_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>(const int count, const int dim, int* y) { _CanonicalAxis<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _At(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void At<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* context) { _At<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <> void At<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* context) { _At<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _AtGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void AtGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx) { _AtGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } template <> void AtGrad<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx) { _AtGrad<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* context) { _Concat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Concat<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* x, float16* y, CUDAContext* context) { _Concat<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ConcatGrad(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* context) { _ConcatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ConcatGrad<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* dy, float16* dx, CUDAContext* context) { _ConcatGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* context) { _Crop1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, x, y); CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Crop1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; if (d >= start && d < end) dx[idx] = dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* context) { _Crop1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* context) { _ConstPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _ReflectPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _EdgePad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _ConstPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx) { _ReflectPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _EdgePad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot(const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>(const int count, const int depth, const int on_value, const float* x, float* y) { _OneHot<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, depth, on_value, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum(const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { T sum_val = 0.0; for (int j = 0; j < axis_dim; j++) sum_val += x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float* x, float* y) { _Sum<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SumGrad(const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_KERNEL_LOOP(idx, count) { for (int j = 0; j < axis_dim; j++) dx[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx) { _SumGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, coeff, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat(const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* context) { _Repeat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _RepeatGrad(const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[(((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* context) { _RepeatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* context) { _Slice<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SliceGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* context) { _SliceGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile(const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* context) { _Tile<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TileGrad(const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim; T gradient = 0; for (int t = 0; t < multiple; t++) gradient += dy[(n * multiple + t) * ex_inner_dim + d]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* context) { _TileGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y) { _Transpose<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Transpose<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* x, float16* y) { _Transpose<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _TransposeGrad(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx) { _TransposeGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void TransposeGrad<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* dy, float16* dx) { _TransposeGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** recurrent.lstm_uint ********************/ template <typename T> __global__ void _LSTMUnitAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x, T* x_act) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; if (ch_4 < g_offset) x_act[idx] = _SigmoidUnit<float>(x[idx]); else x_act[idx] = std::tanh(x[idx]); } } template <typename T> __global__ void _LSTMUnit(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, T* x_act, const T* cont, T* c, T* h) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; T* x_act_ = x_act + n * x_offset; const T i = x_act_[ch]; if (cont != nullptr && cont[n] != T(1)) x_act_[channels
375
Refer the RoIAlign@Caffe2
280
.cu
cu
bsd-2-clause
neopenx/Dragon
1792
<NME> op_kernel.cu <BEF> #ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<float> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } template<> void Empty<float16, CUDAContext>() { _Empty<float16> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout(const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>(const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* context) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>(count, float(0), float(UINT_MAX), mask); _Dropout<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, x, mask, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _DropoutGrad(const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>(const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, dy, mask, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y) { if (channel_shared) { _PRelu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluGrad(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[0]); } } template <typename T> __global__ void _PReluGradNCHW(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template <typename T> __global__ void _PReluGradNHWC(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx) { if (channel_shared) { _PReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluWGradBcast(const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>(const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw) { const int cdim = channels * dim; _PReluWGradBcast<float> << < GET_BLOCKS(cdim), CUDA_NUM_THREADS >> >(cdim, rows, row_offset, dy, x, bcast_dw); CUDA_POST_KERNEL_CHECK; if (channel_shared) { float w_sum = math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier); math::AddScalar<float, CUDAContext>(1, w_sum, dw); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>(CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>(CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu(const int count, const T* x, const float alpha, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (std::exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>(const int count, const float* x, const float alpha, float* y) { _Elu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, alpha, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _EluGrad(const int count, const T* dy, const T* y, const float alpha, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)); } } template<> void EluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float alpha, float* dx) { _EluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, alpha, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu(const int count, const T* x, const float slope, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>(const int count, const float* x, const float slope, float* y) { _Relu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, slope, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ReluHalf(const int count, const half* x, const float slope, half* y) { const half kSlope = __float2half(slope); const half kZero = __float2half(0.0); CUDA_KERNEL_LOOP(idx, count) { #if __CUDA_ARCH__ >= 530 y[idx] = __hgt(x[idx], kZero) ? x[idx] : __hmul(x[idx], kSlope); #endif } } template<> void Relu<float16, CUDAContext>(const int count, const float16* x, const float slope, float16* y) { _ReluHalf<half> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(x), slope, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ReluGrad(const int count, const T* dy, const T* y, const float slope, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + slope * (y[idx] <= 0)); } } template<> void ReluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float slope, float* dx) { _ReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, slope, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (std::exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>(const int count, const float* x, float* y) { _SElu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SEluGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SEluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid(const int n, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>(const int count, const float* x, float* y) { _Sigmoid<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SigmoidGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass(const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max(x[(o_idx * classes + c) * inner_dim + i_idx], max_val); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp(const int count, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = std::exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass(const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* context) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); _SoftmaxExp<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, y); _SoftmaxSumClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SoftmaxDot(const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += (y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx]); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(i, count) { y[i] = std::tanh(x[i]); } } template<> void Tanh<float, CUDAContext>(const int count, const float* x, float* y) { _Tanh<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TanhGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _TanhGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** arithmetic.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip(const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>(const int count, const float low, const float high, const float* x, float* mask, float* y) { _Clip<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, low, high, x, mask, y); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _ScaleWithoutBias(const int n, const T* x, const T* scale, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx]; } } template <typename T> __global__ void _ScaleWithBias(const int n, const T* x, const T* scale, const T* bias, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx] + bias[scale_idx]; } } template<> void Scale<float, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, Bdata, scale_dim, inner_dim, Ydata); else _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, scale_dim, inner_dim, Ydata); } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ScaleWithoutBiasHalf(const int n, const half* x, const half* scale, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hmul(x[idx], scale[scale_idx]); #endif } } template <typename T> __global__ void _ScaleWithBiasHalf(const int n, const half* x, const half* scale, const half* bias, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hadd(__hmul(x[idx], scale[scale_idx]), bias[scale_idx]); #endif } } template<> void Scale<float16, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float16, CUDAContext>(); auto* Ydata = y->mutable_data<float16, CUDAContext>(); auto* Sdata = gamma->data<float16, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float16, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), reinterpret_cast<const half*>(Bdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); else _ScaleWithoutBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); } #endif template <> void ScaleGrad<float, CUDAContext>(const int axis, Tensor* dy, Tensor* gamma, Tensor* dx) { const int count = dx->count(); const int inner_dim = dx->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* dYdata = dy->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dYdata, Sdata, scale_dim, inner_dim, dXdata); } /******************** cast.float2half ********************/ #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _FloatToHalfKernel(const int count, const float* x, half* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = __float2half(x[idx]); } } template <> void Float2Half<float, CUDAContext>(const int count, const float* x, float16* y) { _FloatToHalfKernel<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal(const int count, const T* a, const T* b, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>(const int count, const float* a, const float* b, float* y) { _Equal<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, a, b, y); CUDA_POST_KERNEL_CHECK; } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad(const int count, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>(const int count, const float* dy, float* dx) { _AbsGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy(const int count, const T* x, const T* targets, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = std::log(1 + std::exp(x[idx] - 2 * x[idx] * (x[idx] >= 0))) + x[idx] * ((x[idx] >= 0) - targets[idx]); } } template <> void SigmoidCrossEntropy<float, CUDAContext>(const int count, const float* x, const float* targets, float* loss) { _SigmoidCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, targets, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1(const int count, const float sigma2, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) y[idx] = 0.5 * val * val * sigma2; else y[idx] = abs_val - 0.5 / sigma2; } } template<> void SmoothL1<float, CUDAContext>(const int count, const float sigma2, const float* x, float* y) { _SmoothL1<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SmoothL1Grad(const int count, const float sigma2, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) dx[idx] = val * sigma2; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>(const int count, const float sigma2, const float* dy, float* dx) { _SmoothL1Grad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy(const int count, const T* prob, const T* target, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>(const int count, const float* prob, const float* target, float* loss) { _SoftmaxCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, prob, target, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename T> __global__ void _SparseSoftmaxCrossEntropy(const int count, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { loss[idx] = -log(max(prob[(o_idx * classes + label) * inner_dim + i_idx], FLT_MIN)); valid[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxCrossEntropyGrad(const int count, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { dx[(o_idx * classes + label) * inner_dim + i_idx] -= 1; valid[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_focal_loss ********************/ template <typename T> __global__ void _SparseSoftmaxFocalScale(const int count, const float gamma, const T* prob, T* scale) { CUDA_KERNEL_LOOP(idx, count) { scale[idx] = std::pow((1.0f - prob[idx]), gamma); } } template <typename T> __global__ void _SparseSoftmaxFocalLoss(const int count, const float pos_alpha, const float neg_alpha, const int neg_id, T* scale, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; scale[t_] = label > neg_id ? pos_alpha * scale[t_] : neg_alpha * scale[t_]; loss[idx] = -scale[t_] * std::log(max(prob[t_], FLT_MIN)); valid[idx] = label > neg_id ? 1 : 0; } } } template <> void SparseSoftmaxFocalLoss<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, float* scale, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalScale<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, gamma, prob, scale); _SparseSoftmaxFocalLoss<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, pos_alpha, neg_alpha, neg_id, scale, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxFocalLossGrad(const int count, const float gamma, const int neg_id, const float eps, const T* scale, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; T grad = -gamma * (scale[t_] / max((1.0f - prob[t_]), eps)) * std::log(max(prob[t_], FLT_MIN)) * prob[t_] + scale[t_]; for (int c = 0; c < classes; c++) { const int i_ = (o_idx * classes + c) * inner_dim + i_idx; if (c == label) { dx[i_] = grad * (prob[t_] - 1); } else { dx[i_] = grad * prob[i_]; } } valid[idx] = label > neg_id ? 1 : 0; } } } template<> void SparseSoftmaxFocalLossGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float gamma, const int neg_id, const float eps, const float* scale, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalLossGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, gamma, neg_id, eps, scale, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <> void ImageData<float, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ImageData<float, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Arange(const int count, const int start, const int step, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>(const int count, const int start, const int step, float* y) { _Arange<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } template<> void Arange<int, CUDAContext>(const int count, const int start, const int step, int* y) { _Arange<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Argmax(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T max_val = -FLT_MAX; int max_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val > max_val) { max_val = val; max_idx = j; } } y[idx] = max_idx; } } template<> void Argmax<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmax<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmin ********************/ template <typename T> __global__ void _Argmin(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T min_val = FLT_MAX; int min_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val < min_val) { min_val = val; min_idx = j; } } y[idx] = min_idx; } } template<> void Argmin<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmin<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.at ********************/ template <typename T> __global__ void _CanonicalAxis(const int count, const int dim, T* y) { CUDA_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>(const int count, const int dim, int* y) { _CanonicalAxis<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _At(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void At<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* context) { _At<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <> void At<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* context) { _At<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _AtGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void AtGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx) { _AtGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } template <> void AtGrad<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx) { _AtGrad<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* context) { _Concat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Concat<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* x, float16* y, CUDAContext* context) { _Concat<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ConcatGrad(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* context) { _ConcatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ConcatGrad<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* dy, float16* dx, CUDAContext* context) { _ConcatGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* context) { _Crop1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, x, y); CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Crop1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; if (d >= start && d < end) dx[idx] = dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* context) { _Crop1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* context) { _ConstPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _ReflectPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _EdgePad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _ConstPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx) { _ReflectPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _EdgePad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot(const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>(const int count, const int depth, const int on_value, const float* x, float* y) { _OneHot<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, depth, on_value, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum(const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { T sum_val = 0.0; for (int j = 0; j < axis_dim; j++) sum_val += x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float* x, float* y) { _Sum<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SumGrad(const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_KERNEL_LOOP(idx, count) { for (int j = 0; j < axis_dim; j++) dx[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx) { _SumGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, coeff, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat(const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* context) { _Repeat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _RepeatGrad(const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[(((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* context) { _RepeatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* context) { _Slice<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SliceGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* context) { _SliceGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile(const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* context) { _Tile<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TileGrad(const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim; T gradient = 0; for (int t = 0; t < multiple; t++) gradient += dy[(n * multiple + t) * ex_inner_dim + d]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* context) { _TileGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y) { _Transpose<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Transpose<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* x, float16* y) { _Transpose<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _TransposeGrad(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx) { _TransposeGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void TransposeGrad<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* dy, float16* dx) { _TransposeGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** recurrent.lstm_uint ********************/ template <typename T> __global__ void _LSTMUnitAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x, T* x_act) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; if (ch_4 < g_offset) x_act[idx] = _SigmoidUnit<float>(x[idx]); else x_act[idx] = std::tanh(x[idx]); } } template <typename T> __global__ void _LSTMUnit(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, T* x_act, const T* cont, T* c, T* h) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; T* x_act_ = x_act + n * x_offset; const T i = x_act_[ch]; if (cont != nullptr && cont[n] != T(1)) x_act_[channels
375
Refer the RoIAlign@Caffe2
280
.cu
cu
bsd-2-clause
neopenx/Dragon
1793
<NME> refactor_your_tests.rst <BEF> How to refactor your tests ========================== A good, although uncommon development practice when building features or fixing bugs is refactoring. Refactoring means making small, incremental changes that improve the quality of code. This usually means: * De-duplicating code * Decoupling code * Minimizing the amount of imperative code (e.g. python code) in favor of declarative code (e.g. YAML configuration). * Improving the code's readability (changing names, adding comments) Tests are code too, so it's good practice to refactor your tests to gradually improve :doc:`/glossary/test_quality` as you write new tests or fix existing ones. Of course, you should only refactor *passing* tests and you should always run passing tests after refactoring to ensure that they are still passing. Here is a list of things which commonly need refactoring in tests. De-duplicate duplicated tests ----------------------------- You may find after a while that your test suite has a lot of duplication - for example, tests that do almost the same thing in two or three slightly different ways. See :doc:`/glossary/parameterize_test_cases` for how to remove some of that duplication. Move configuration from engine.py to all.settings ------------------------------------------------- Your execution engine should be kept as short as possible yet still capable. If you have any long lists in your engine.py, moving them into all.settings will help to keep it clean. Change HTML IDs and Classes to make them more readable ------------------------------------------------------ Beware! This might be best left to a developer since it may require code changes as well if the ID is used in many places (code, javascript, CSS, etc.) as well as changing other IDs to accomodate. If you have test steps that look like this:: - Click: btn-rgstr-a1 Because the registration button had the HTML ID 'btn-rgstr-a1' when you wrote the test, it might be worth changing the ID in the test and in the HTML code to make it more readable, e.g. to something like:: - Click: register <MSG> DOCS : Docs updates <DFF> @@ -12,7 +12,7 @@ This usually means: * De-duplicating code * Decoupling code * Minimizing the amount of imperative code (e.g. python code) in favor of declarative code (e.g. YAML configuration). -* Improving the code's readability (changing names, adding comments) +* Improving the code's readability (changing names, adding comments, disambiguating identifiers). Tests are code too, so it's good practice to refactor your tests to gradually improve :doc:`/glossary/test_quality` as you write new tests or fix existing ones. @@ -20,7 +20,8 @@ as you write new tests or fix existing ones. Of course, you should only refactor *passing* tests and you should always run passing tests after refactoring to ensure that they are still passing. -Here is a list of things which commonly need refactoring in tests. +Here is a non-exhaustive list of things which you could work on while writing new tests or changing +old ones that you can do to improve your test quality: De-duplicate duplicated tests @@ -36,7 +37,8 @@ Move configuration from engine.py to all.settings ------------------------------------------------- Your execution engine should be kept as short as possible yet still capable. If you have -any long lists in your engine.py, moving them into all.settings will help to keep it clean. +any long lists or chunks of data in your engine.py, moving them into all.settings will +help to keep it clean. Change HTML IDs and Classes to make them more readable @@ -48,10 +50,39 @@ other IDs to accomodate. If you have test steps that look like this:: - - Click: btn-rgstr-a1 + - Click: btnreg -Because the registration button had the HTML ID 'btn-rgstr-a1' when you wrote the test, +Because the registration button had the HTML ID 'btnreg' when you wrote the test, it might be worth changing the ID in the test and in the HTML code to make it more readable, e.g. to something like:: + - Click: register-button + +Similarly, if there is a button with ID:: + - Click: register + +It *may* be worth renaming it to:: + + - Click: register-button + +If there is *any possibility of confusion* between register "something else" (e.g. a register link). + + + + +Increase the realism of your tests +---------------------------------- + + +Run your tests in a more cost effective way +------------------------------------------- + +If you have an extremely realistic test suite, it may end up being very expensive and +very slow to run it from beginning to end. This is not necessarily a bad thing, although +when it does start happening you need to start prioritizing some test cases over others +and ensure that they are run + + +Bring continuous integration environment closer to production +-------------------------------------------------------------
36
DOCS : Docs updates
5
.rst
rst
agpl-3.0
hitchtest/hitch
1794
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> <version>2.8.6</version> </dependency> ``` <MSG> Release 2.8.8 1. Fix RedisCache keyprefix cannot be changed defect 2. Set separate expire time of Session and Cache instead of only one expire time of RedisManager <DFF> @@ -16,7 +16,7 @@ You can choose these 2 ways to include shiro-redis into your project <dependency> <groupId>org.crazycake</groupId> <artifactId>shiro-redis</artifactId> - <version>2.8.6</version> + <version>2.8.8</version> </dependency> ```
1
Release 2.8.8 1. Fix RedisCache keyprefix cannot be changed defect 2. Set separate expire time of Session and Cache instead of only one expire time of RedisManager
1
.md
md
mit
alexxiyang/shiro-redis
1795
<NME> README.rst <BEF> Hitch ===== .. image:: https://badges.gitter.im/Join%20Chat.svg :alt: Join the chat at https://gitter.im/hitchtest/hitch :target: https://gitter.im/hitchtest/hitch?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge Hitch is a UNIX-based testing framework for writing integration tests with an emphasis on: * Minimizing and eliminating `brittle tests <https://hitchtest.readthedocs.org/en/latest/glossary/brittle_tests.html>`_ * `Test readability <https://hitchtest.readthedocs.org/en/latest/glossary/test_readability.html>`_ * `Loose coupling <https://hitchtest.readthedocs.org/en/latest/glossary/loose_coupling.html>`_ * `Test realism <https://hitchtest.readthedocs.org/en/latest/glossary/test_realism.html>`_ * Tests that `fail fast <https://hitchtest.readthedocs.org/en/latest/glossary/fail_fast.html>`_ and `fail clearly <https://hitchtest.readthedocs.org/en/latest/glossary/fail_clearly.html>`_ Available plugins ----------------- Hitch comes with a variety of plugins to aid you to realistically testing various kinds of software, components and scenarios, including: * `Python <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpython.html>`_ (includes Django and Celery service definitions) * `Postgresql <https://hitchtest.readthedocs.org/en/latest/plugins/hitchpostgres.html>`_ * `Redis <https://hitchtest.readthedocs.org/en/latest/plugins/hitchredis.html>`_ * `Web apps (using selenium) <https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html>`_ * Command line apps (using pexpect) * `Cron <https://hitchtest.readthedocs.org/en/latest/plugins/hitchcron.html>`_ * MySQL * RabbitMQ * Elastic Search `Plugin documentation <https://hitchtest.readthedocs.org/en/latest/plugins/>`_ Getting started --------------- See the `quickstart tutorial <https://hitchtest.readthedocs.org/en/latest/quickstart/index.html>`_ on how to get started testing an existing project. Also check out `cookiecutter-django <https://github.com/pydanny/cookiecutter-django>`_ if you want to start a new Django project with tests. Status ------ Hitch is currently in beta. It is regression tested on: * Operating Systems : Mac OS X Yosemite, Ubuntu, Debian, Fedora and Arch Linux. * Python versions : 3.5.0, 3.4.3, 3.4.0 and 3.3.0 `(what about python 2?) <https://hitchtest.readthedocs.org/en/latest/faq/what_about_python2.html>`_ It does not currently work on Windows. See `tested on <https://hitchtest.readthedocs.org/en/latest/misc/tested_on.html>`_ for more details on how the framework is tested (with itself, naturally). Contents of this project ------------------------ This project contains: * The code for the bootstrapper script * Documentation for the whole project (`hosted at readthedocs <https://hitchtest.readthedocs.org/en/latest/>`_) * Code for other components is at: https://github.com/hitchtest/ * HitchPostgres_ - Simple wrapper around Postgres. * HitchSelenium_ - Simple wrapper around Selenium. * HitchRedis_ - Simple wrapper around Redis. * HitchDjango_ - Simple wrapper around Django. * HitchCelery_ - Simple wrapper around Celery. More coming soon. .. _HitchCron: https://github.com/hitchtest/hitchcron .. _HitchSelenium: https://github.com/hitchtest/hitchselenium .. _HitchRedis: https://github.com/hitchtest/hitchredis .. _HitchDjango: https://github.com/hitchtest/hitchdjango .. _HitchPostgres: https://github.com/hitchtest/hitchpostgres .. _HitchCelery: https://github.com/hitchtest/hitchcelery .. _pipsi: https://github.com/mitsuhiko/pipsi <MSG> Merge pull request #12 from thedrow/patch-1 Remove links to obsolete projects and link to hitchpython instead <DFF> @@ -94,8 +94,7 @@ together, or not at all. Those are: * HitchPostgres_ - Simple wrapper around Postgres. * HitchSelenium_ - Simple wrapper around Selenium. * HitchRedis_ - Simple wrapper around Redis. -* HitchDjango_ - Simple wrapper around Django. -* HitchCelery_ - Simple wrapper around Celery. +* HitchPython_ - Simple wrapper around python programs including Django and Celery. More coming soon. @@ -116,8 +115,7 @@ See the roadmap_ for planned future features. .. _HitchCron: https://github.com/hitchtest/hitchcron .. _HitchSelenium: https://github.com/hitchtest/hitchselenium .. _HitchRedis: https://github.com/hitchtest/hitchredis -.. _HitchDjango: https://github.com/hitchtest/hitchdjango .. _HitchPostgres: https://github.com/hitchtest/hitchpostgres -.. _HitchCelery: https://github.com/hitchtest/hitchcelery +.. _HitchPython: https://github.com/hitchtest/hitchpython .. _pipsi: https://github.com/mitsuhiko/pipsi
2
Merge pull request #12 from thedrow/patch-1
4
.rst
rst
agpl-3.0
hitchtest/hitch
1796
<NME> README.md <BEF> shiro-redis ============= ## Introduction shiro only provide the support of ehcache and concurrentHashMap. Here is an implement of redis cache can be used by shiro. Hope it will help you! ## Documentation Official documentation [is located here](http://alexxiyang.github.io/shiro-redis/). - redisSessionDAO.valueSerializer > NOTE > Shiro-redis don't support SimpleAuthenticationInfo created by this constructor `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, ByteSource credentialsSalt, String realmName)`. > Please use `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, String realmName)` instead. # If you found any bugs Please send email to [email protected] <MSG> Change 2.4.8 README.md, remove NOTE. Salt is available now. <DFF> @@ -158,10 +158,6 @@ These 4 Serializers are replaceable: - redisSessionDAO.valueSerializer -> NOTE -> Shiro-redis don't support SimpleAuthenticationInfo created by this constructor `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, ByteSource credentialsSalt, String realmName)`. -> Please use `org.apache.shiro.authc.SimpleAuthenticationInfo.SimpleAuthenticationInfo(Object principal, Object hashedCredentials, String realmName)` instead. - # If you found any bugs Please send email to [email protected]
0
Change 2.4.8 README.md, remove NOTE. Salt is available now.
4
.md
md
mit
alexxiyang/shiro-redis
1797
<NME> fail_fast.rst <BEF> ADDFILE <MSG> DOCS : Updates to API documentation and glossary. <DFF> @@ -0,0 +1,7 @@ +Fail Fast +========= + +Fail fast is a testing principle. + +See: `Fail Fast Wikipedia Page <https://en.wikipedia.org/wiki/Fail_fast>`_ +
7
DOCS : Updates to API documentation and glossary.
0
.rst
rst
agpl-3.0
hitchtest/hitch
1798
<NME> RedisSessionDAOTest.java <BEF> package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.crazycake.shiro.exception.SerializationException; import org.junit.Before; import org.junit.Test; import java.io.Serializable; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; public class RedisSessionDAOTest { private IRedisManager redisManager; private StringSerializer keySerializer = new StringSerializer(); private ObjectSerializer valueSerializer = new ObjectSerializer(); @BeforeEach public void setUp() { redisManager = mock(IRedisManager.class); } private RedisSessionDAO mountRedisSessionDAO(Integer expire) { RedisSessionDAO redisSessionDAO = new RedisSessionDAO(); if (expire != null) { redisSessionDAO.setExpire(expire); } redisSessionDAO.setKeyPrefix("student:"); redisSessionDAO.setRedisManager(redisManager); return redisSessionDAO; } @Test public void testUpdate() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(99, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:99"), valueSerializer.serialize(session), 2); } @Test public void testUpdateByCustomExpire() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(3); StudentSession session = new StudentSession(98, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:98"), valueSerializer.serialize(session), 3); } @Test public void testUpdateByNoExpire() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(-1); StudentSession session = new StudentSession(97, 2000); sessionDAO.update(session); verify(redisManager).set(keySerializer.serialize("student:97"), valueSerializer.serialize(session), -1); } @Test public void testDelete() throws SerializationException { RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); StudentSession session = new StudentSession(96, 1000); sessionDAO.delete(session); verify(redisManager).del(keySerializer.serialize("student:96")); } @Test public void testGetActiveSessions() throws SerializationException { Set<byte[]> mockKeys = new HashSet<byte[]>(); mockKeys.add(keySerializer.serialize("student:1")); mockKeys.add(keySerializer.serialize("student:2")); when(redisManager.keys(keySerializer.serialize("student:*"))).thenReturn(mockKeys); StudentSession mockSession1 = new StudentSession(1, 2000); StudentSession mockSession2 = new StudentSession(2, 2000); when(redisManager.get(keySerializer.serialize("student:1"))).thenReturn(valueSerializer.serialize(mockSession1)); when(redisManager.get(keySerializer.serialize("student:2"))).thenReturn(valueSerializer.serialize(mockSession2)); RedisSessionDAO sessionDAO = mountRedisSessionDAO(null); assertThat(sessionDAO.getActiveSessions().size(), is(2)); } } class StudentSession implements Session, Serializable { private Integer id; private long timeout; public StudentSession(Integer id, long timeout) { this.id = id; this.timeout = timeout; } @Override public Serializable getId() { return id; } @Override public Date getStartTimestamp() { return null; } @Override public Date getLastAccessTime() { return null; } @Override public long getTimeout() throws InvalidSessionException { return timeout; } @Override public void setTimeout(long l) throws InvalidSessionException { } @Override public String getHost() { return null; } @Override public void touch() throws InvalidSessionException { } @Override public void stop() throws InvalidSessionException { } @Override public Collection<Object> getAttributeKeys() throws InvalidSessionException { return null; } @Override public Object getAttribute(Object o) throws InvalidSessionException { return null; } @Override public void setAttribute(Object o, Object o1) throws InvalidSessionException { } @Override public Object removeAttribute(Object o) throws InvalidSessionException { return null; } } <MSG> - create serializer package - refactor cluster <DFF> @@ -3,6 +3,8 @@ package org.crazycake.shiro; import org.apache.shiro.session.Session; import org.apache.shiro.session.UnknownSessionException; import org.crazycake.shiro.exception.SerializationException; +import org.crazycake.shiro.serializer.ObjectSerializer; +import org.crazycake.shiro.serializer.StringSerializer; import org.junit.Before; import org.junit.Test;
2
- create serializer package - refactor cluster
0
.java
java
mit
alexxiyang/shiro-redis
1799
<NME> FakePrincipalWithDefaultId2.java <BEF> ADDFILE <MSG> - Add principalIdFieldName. Using reflection to get id of redis object instead of using AuthCachePrincipal <DFF> @@ -0,0 +1,14 @@ +package org.crazycake.shiro.model; + +public class FakePrincipalWithDefaultId2 { + + private Integer id; + + public Integer getId() { + return id; + } + + public void setId(Integer id) { + this.id = id; + } +}
14
- Add principalIdFieldName. Using reflection to get id of redis object instead of using AuthCachePrincipal
0
.java
java
mit
alexxiyang/shiro-redis