code
stringlengths
4
1.01M
language
stringclasses
2 values
<?php class WikiLeaksBridge extends BridgeAbstract { const NAME = 'WikiLeaks'; const URI = 'https://wikileaks.org'; const DESCRIPTION = 'Returns the latest news or articles from WikiLeaks'; const MAINTAINER = 'logmanoriginal'; const PARAMETERS = array( array( 'category' => array( 'name' => 'Category', 'type' => 'list', 'required' => true, 'title' => 'Select your category', 'values' => array( 'News' => '-News-', 'Leaks' => array( 'All' => '-Leaks-', 'Intelligence' => '+-Intelligence-+', 'Global Economy' => '+-Global-Economy-+', 'International Politics' => '+-International-Politics-+', 'Corporations' => '+-Corporations-+', 'Government' => '+-Government-+', 'War & Military' => '+-War-Military-+' ) ), 'defaultValue' => 'news' ), 'teaser' => array( 'name' => 'Show teaser', 'type' => 'checkbox', 'required' => false, 'title' => 'If checked feeds will display the teaser', 'defaultValue' => true ) ) ); public function collectData(){ $html = getSimpleHTMLDOM($this->getURI()); // News are presented differently switch($this->getInput('category')) { case '-News-': $this->loadNewsItems($html); break; default: $this->loadLeakItems($html); } } public function getURI(){ if(!is_null($this->getInput('category'))) { return static::URI . '/' . $this->getInput('category') . '.html'; } return parent::getURI(); } public function getName(){ if(!is_null($this->getInput('category'))) { $category = array_search( $this->getInput('category'), static::PARAMETERS[0]['category']['values'] ); if($category === false) { $category = array_search( $this->getInput('category'), static::PARAMETERS[0]['category']['values']['Leaks'] ); } return $category . ' - ' . static::NAME; } return parent::getName(); } private function loadNewsItems($html){ $articles = $html->find('div.news-articles ul li'); if(is_null($articles) || count($articles) === 0) { return; } foreach($articles as $article) { $item = array(); $item['title'] = $article->find('h3', 0)->plaintext; $item['uri'] = static::URI . $article->find('h3 a', 0)->href; $item['content'] = $article->find('div.introduction', 0)->plaintext; $item['timestamp'] = strtotime($article->find('div.timestamp', 0)->plaintext); $this->items[] = $item; } } private function loadLeakItems($html){ $articles = $html->find('li.tile'); if(is_null($articles) || count($articles) === 0) { return; } foreach($articles as $article) { $item = array(); $item['title'] = $article->find('h2', 0)->plaintext; $item['uri'] = static::URI . $article->find('a', 0)->href; $teaser = static::URI . '/' . $article->find('div.teaser img', 0)->src; if($this->getInput('teaser')) { $item['content'] = '<img src="' . $teaser . '" /><p>' . $article->find('div.intro', 0)->plaintext . '</p>'; } else { $item['content'] = $article->find('div.intro', 0)->plaintext; } $item['timestamp'] = strtotime($article->find('div.timestamp', 0)->plaintext); $item['enclosures'] = array($teaser); $this->items[] = $item; } } }
Java
/* * Copyright (c) 2011 Synaptics Incorporated * Copyright (c) 2011 Unixphere * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/sched.h> #include <linux/gpio.h> #include <linux/rmi.h> #define COMMS_DEBUG 0 #define FF_DEBUG 0 #define RMI_PROTOCOL_VERSION_ADDRESS 0xa0fd #define SPI_V2_UNIFIED_READ 0xc0 #define SPI_V2_WRITE 0x40 #define SPI_V2_PREPARE_SPLIT_READ 0xc8 #define SPI_V2_EXECUTE_SPLIT_READ 0xca #define RMI_SPI_BLOCK_DELAY_US 65 #define RMI_SPI_BYTE_DELAY_US 65 #define RMI_SPI_WRITE_DELAY_US 0 #define RMI_V1_READ_FLAG 0x80 #define RMI_PAGE_SELECT_REGISTER 0x00FF #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80) #define DEFAULT_POLL_INTERVAL_MS 13 static char *spi_v1_proto_name = "spi"; static char *spi_v2_proto_name = "spiv2"; struct rmi_spi_data { struct mutex page_mutex; int page; int (*set_page) (struct rmi_phys_device *phys, u8 page); bool split_read_pending; int enabled; int irq; int irq_flags; struct rmi_phys_device *phys; struct completion irq_comp; /* Following are used when polling. */ struct hrtimer poll_timer; struct work_struct poll_work; int poll_interval; }; static irqreturn_t rmi_spi_hard_irq(int irq, void *p) { struct rmi_phys_device *phys = p; struct rmi_spi_data *data = phys->data; struct rmi_device_platform_data *pdata = phys->dev->platform_data; if (data->split_read_pending && gpio_get_value(pdata->attn_gpio) == pdata->attn_polarity) { phys->info.attn_count++; complete(&data->irq_comp); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; } static irqreturn_t rmi_spi_irq_thread(int irq, void *p) { struct rmi_phys_device *phys = p; struct rmi_device *rmi_dev = phys->rmi_dev; struct rmi_driver *driver = rmi_dev->driver; struct rmi_device_platform_data *pdata = phys->dev->platform_data; if (gpio_get_value(pdata->attn_gpio) == pdata->attn_polarity) { phys->info.attn_count++; if (driver && driver->irq_handler) driver->irq_handler(rmi_dev, irq); } return IRQ_HANDLED; } static void spi_poll_work(struct work_struct *work) { struct rmi_spi_data *data = container_of(work, struct rmi_spi_data, poll_work); struct rmi_device *rmi_dev = data->phys->rmi_dev; struct rmi_driver *driver = rmi_dev->driver; if (driver && driver->irq_handler) driver->irq_handler(rmi_dev, 0); } /* This is the timer function for polling - it simply has to schedule work * and restart the timer. */ static enum hrtimer_restart spi_poll_timer(struct hrtimer *timer) { struct rmi_spi_data *data = container_of(timer, struct rmi_spi_data, poll_timer); if (!work_pending(&data->poll_work)) schedule_work(&data->poll_work); hrtimer_start(&data->poll_timer, ktime_set(0, data->poll_interval), HRTIMER_MODE_REL); return HRTIMER_NORESTART; } static int rmi_spi_xfer(struct rmi_phys_device *phys, const u8 *txbuf, unsigned n_tx, u8 *rxbuf, unsigned n_rx) { struct spi_device *client = to_spi_device(phys->dev); struct rmi_spi_data *v2_data = phys->data; struct rmi_device_platform_data *pdata = phys->dev->platform_data; int status; struct spi_message message; struct spi_transfer *xfers; int total_bytes = n_tx + n_rx; u8 local_buf[total_bytes]; int xfer_count = 0; int xfer_index = 0; int block_delay = n_rx > 0 ? pdata->spi_data.block_delay_us : 0; int byte_delay = n_rx > 1 ? pdata->spi_data.read_delay_us : 0; int write_delay = n_tx > 1 ? pdata->spi_data.write_delay_us : 0; #if FF_DEBUG bool bad_data = true; #endif #if COMMS_DEBUG || FF_DEBUG int i; #endif if (v2_data->split_read_pending) { block_delay = n_rx > 0 ? pdata->spi_data.split_read_block_delay_us : 0; byte_delay = n_tx > 1 ? pdata->spi_data.split_read_byte_delay_us : 0; write_delay = 0; } if (n_tx) { phys->info.tx_count++; phys->info.tx_bytes += n_tx; if (write_delay) xfer_count += n_tx; else xfer_count += 1; } if (n_rx) { phys->info.rx_count++; phys->info.rx_bytes += n_rx; if (byte_delay) xfer_count += n_rx; else xfer_count += 1; } xfers = kcalloc(xfer_count, sizeof(struct spi_transfer), GFP_KERNEL); if (!xfers) return -ENOMEM; spi_message_init(&message); if (n_tx) { if (write_delay) { for (xfer_index = 0; xfer_index < n_tx; xfer_index++) { memset(&xfers[xfer_index], 0, sizeof(struct spi_transfer)); xfers[xfer_index].len = 1; xfers[xfer_index].delay_usecs = write_delay; xfers[xfer_index].tx_buf = txbuf + xfer_index; spi_message_add_tail(&xfers[xfer_index], &message); } } else { memset(&xfers[0], 0, sizeof(struct spi_transfer)); xfers[0].len = n_tx; spi_message_add_tail(&xfers[0], &message); memcpy(local_buf, txbuf, n_tx); xfers[0].tx_buf = local_buf; xfer_index++; } if (block_delay) xfers[xfer_index-1].delay_usecs = block_delay; } if (n_rx) { if (byte_delay) { int buffer_offset = n_tx; for (; xfer_index < xfer_count; xfer_index++) { memset(&xfers[xfer_index], 0, sizeof(struct spi_transfer)); xfers[xfer_index].len = 1; xfers[xfer_index].delay_usecs = byte_delay; xfers[xfer_index].rx_buf = local_buf + buffer_offset; buffer_offset++; spi_message_add_tail(&xfers[xfer_index], &message); } } else { memset(&xfers[xfer_index], 0, sizeof(struct spi_transfer)); xfers[xfer_index].len = n_rx; xfers[xfer_index].rx_buf = local_buf + n_tx; spi_message_add_tail(&xfers[xfer_index], &message); xfer_index++; } } #if COMMS_DEBUG if (n_tx) { dev_dbg(&client->dev, "SPI sends %d bytes: ", n_tx); for (i = 0; i < n_tx; i++) pr_info("%02X ", txbuf[i]); pr_info("\n"); } #endif /* do the i/o */ if (pdata->spi_data.cs_assert) { status = pdata->spi_data.cs_assert( pdata->spi_data.cs_assert_data, true); if (status) { dev_err(phys->dev, "Failed to assert CS, code %d.\n", status); /* nonzero means error */ status = -1; goto error_exit; } else status = 0; } if (pdata->spi_data.pre_delay_us) udelay(pdata->spi_data.pre_delay_us); status = spi_sync(client, &message); if (pdata->spi_data.post_delay_us) udelay(pdata->spi_data.post_delay_us); if (pdata->spi_data.cs_assert) { status = pdata->spi_data.cs_assert( pdata->spi_data.cs_assert_data, false); if (status) { dev_err(phys->dev, "Failed to deassert CS. code %d.\n", status); /* nonzero means error */ status = -1; goto error_exit; } else status = 0; } if (status == 0) { memcpy(rxbuf, local_buf + n_tx, n_rx); status = message.status; } else { phys->info.tx_errs++; phys->info.rx_errs++; dev_err(phys->dev, "spi_sync failed with error code %d.", status); } #if COMMS_DEBUG if (n_rx) { dev_dbg(&client->dev, "SPI received %d bytes: ", n_rx); for (i = 0; i < n_rx; i++) pr_info("%02X ", rxbuf[i]); pr_info("\n"); } #endif #if FF_DEBUG if (n_rx) { for (i = 0; i < n_rx; i++) { if (rxbuf[i] != 0xFF) { bad_data = false; break; } } if (bad_data) { phys->info.rx_errs++; dev_err(phys->dev, "BAD READ %lu out of %lu.\n", phys->info.rx_errs, phys->info.rx_count); } } #endif error_exit: kfree(xfers); return status; } static int rmi_spi_v2_write_block(struct rmi_phys_device *phys, u16 addr, u8 *buf, int len) { struct rmi_spi_data *data = phys->data; u8 txbuf[len + 4]; int error; txbuf[0] = SPI_V2_WRITE; txbuf[1] = (addr >> 8) & 0x00FF; txbuf[2] = addr & 0x00FF; txbuf[3] = len; memcpy(&txbuf[4], buf, len); mutex_lock(&data->page_mutex); if (RMI_SPI_PAGE(addr) != data->page) { error = data->set_page(phys, RMI_SPI_PAGE(addr)); if (error < 0) goto exit; } error = rmi_spi_xfer(phys, buf, len + 4, NULL, 0); if (error < 0) goto exit; error = len; exit: mutex_unlock(&data->page_mutex); return error; } static int rmi_spi_v2_write(struct rmi_phys_device *phys, u16 addr, u8 data) { int error = rmi_spi_v2_write_block(phys, addr, &data, 1); return (error == 1) ? 0 : error; } static int rmi_spi_v1_write_block(struct rmi_phys_device *phys, u16 addr, u8 *buf, int len) { struct rmi_spi_data *data = phys->data; unsigned char txbuf[len + 2]; int error; txbuf[0] = addr >> 8; txbuf[1] = addr; memcpy(txbuf+2, buf, len); mutex_lock(&data->page_mutex); if (RMI_SPI_PAGE(addr) != data->page) { error = data->set_page(phys, RMI_SPI_PAGE(addr)); if (error < 0) goto exit; } error = rmi_spi_xfer(phys, txbuf, len + 2, NULL, 0); if (error < 0) goto exit; error = len; exit: mutex_unlock(&data->page_mutex); return error; } static int rmi_spi_v1_write(struct rmi_phys_device *phys, u16 addr, u8 data) { int error = rmi_spi_v1_write_block(phys, addr, &data, 1); return (error == 1) ? 0 : error; } static int rmi_spi_v2_split_read_block(struct rmi_phys_device *phys, u16 addr, u8 *buf, int len) { struct rmi_spi_data *data = phys->data; u8 txbuf[4]; u8 rxbuf[len + 1]; /* one extra byte for read length */ int error; txbuf[0] = SPI_V2_PREPARE_SPLIT_READ; txbuf[1] = (addr >> 8) & 0x00FF; txbuf[2] = addr & 0x00ff; txbuf[3] = len; mutex_lock(&data->page_mutex); if (RMI_SPI_PAGE(addr) != data->page) { error = data->set_page(phys, RMI_SPI_PAGE(addr)); if (error < 0) goto exit; } data->split_read_pending = true; error = rmi_spi_xfer(phys, txbuf, 4, NULL, 0); if (error < 0) { data->split_read_pending = false; goto exit; } wait_for_completion(&data->irq_comp); txbuf[0] = SPI_V2_EXECUTE_SPLIT_READ; txbuf[1] = 0; error = rmi_spi_xfer(phys, txbuf, 2, rxbuf, len + 1); data->split_read_pending = false; if (error < 0) goto exit; /* first byte is length */ if (rxbuf[0] != len) { error = -EIO; goto exit; } memcpy(buf, rxbuf + 1, len); error = len; exit: mutex_unlock(&data->page_mutex); return error; } static int rmi_spi_v2_read_block(struct rmi_phys_device *phys, u16 addr, u8 *buf, int len) { struct rmi_spi_data *data = phys->data; u8 txbuf[4]; int error; txbuf[0] = SPI_V2_UNIFIED_READ; txbuf[1] = (addr >> 8) & 0x00FF; txbuf[2] = addr & 0x00ff; txbuf[3] = len; mutex_lock(&data->page_mutex); if (RMI_SPI_PAGE(addr) != data->page) { error = data->set_page(phys, RMI_SPI_PAGE(addr)); if (error < 0) goto exit; } error = rmi_spi_xfer(phys, txbuf, 4, buf, len); if (error < 0) goto exit; error = len; exit: mutex_unlock(&data->page_mutex); return error; } static int rmi_spi_v2_read(struct rmi_phys_device *phys, u16 addr, u8 *buf) { int error = rmi_spi_v2_read_block(phys, addr, buf, 1); return (error == 1) ? 0 : error; } static int rmi_spi_v1_read_block(struct rmi_phys_device *phys, u16 addr, u8 *buf, int len) { struct rmi_spi_data *data = phys->data; u8 txbuf[2]; int error; txbuf[0] = (addr >> 8) | RMI_V1_READ_FLAG; txbuf[1] = addr; mutex_lock(&data->page_mutex); if (RMI_SPI_PAGE(addr) != data->page) { error = data->set_page(phys, RMI_SPI_PAGE(addr)); if (error < 0) goto exit; } error = rmi_spi_xfer(phys, txbuf, 2, buf, len); if (error < 0) goto exit; error = len; exit: mutex_unlock(&data->page_mutex); return error; } static int rmi_spi_v1_read(struct rmi_phys_device *phys, u16 addr, u8 *buf) { int error = rmi_spi_v1_read_block(phys, addr, buf, 1); return (error == 1) ? 0 : error; } #define RMI_SPI_PAGE_SELECT_WRITE_LENGTH 1 static int rmi_spi_v1_set_page(struct rmi_phys_device *phys, u8 page) { struct rmi_spi_data *data = phys->data; u8 txbuf[] = {RMI_PAGE_SELECT_REGISTER >> 8, RMI_PAGE_SELECT_REGISTER & 0xFF, page}; int error; error = rmi_spi_xfer(phys, txbuf, sizeof(txbuf), NULL, 0); if (error < 0) { dev_err(phys->dev, "Failed to set page select, code: %d.\n", error); return error; } data->page = page; return RMI_SPI_PAGE_SELECT_WRITE_LENGTH; } static int rmi_spi_v2_set_page(struct rmi_phys_device *phys, u8 page) { struct rmi_spi_data *data = phys->data; u8 txbuf[] = {SPI_V2_WRITE, RMI_PAGE_SELECT_REGISTER >> 8, RMI_PAGE_SELECT_REGISTER & 0xFF, RMI_SPI_PAGE_SELECT_WRITE_LENGTH, page}; int error; error = rmi_spi_xfer(phys, txbuf, sizeof(txbuf), NULL, 0); if (error < 0) { dev_err(phys->dev, "Failed to set page select, code: %d.\n", error); return error; } data->page = page; return RMI_SPI_PAGE_SELECT_WRITE_LENGTH; } static int acquire_attn_irq(struct rmi_spi_data *data) { int retval; struct rmi_phys_device *rmi_phys = data->phys; retval = request_threaded_irq(data->irq, rmi_spi_hard_irq, rmi_spi_irq_thread, data->irq_flags, dev_name(rmi_phys->dev), rmi_phys); if (retval < 0) { dev_err(&(rmi_phys->rmi_dev->dev), "request_threaded_irq " "failed, code: %d.\n", retval); } return retval; } static int setup_attn(struct rmi_spi_data *data) { int retval; struct rmi_phys_device *rmi_phys = data->phys; struct rmi_device_platform_data *pdata = rmi_phys->dev->platform_data; retval = acquire_attn_irq(data); if (retval < 0) return retval; #if defined(CONFIG_RMI4_DEV) retval = gpio_export(pdata->attn_gpio, false); if (retval) { dev_warn(&(rmi_phys->rmi_dev->dev), "WARNING: Failed to export ATTN gpio!\n"); retval = 0; } else { retval = gpio_export_link(&(rmi_phys->rmi_dev->dev), "attn", pdata->attn_gpio); if (retval) { dev_warn(&(rmi_phys->rmi_dev->dev), "WARNING: " "Failed to symlink ATTN gpio!\n"); retval = 0; } else { dev_info(&(rmi_phys->rmi_dev->dev), "%s: Exported GPIO %d.", __func__, pdata->attn_gpio); } } #endif /* CONFIG_RMI4_DEV */ return retval; } static int setup_polling(struct rmi_spi_data *data) { INIT_WORK(&data->poll_work, spi_poll_work); hrtimer_init(&data->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->poll_timer.function = spi_poll_timer; hrtimer_start(&data->poll_timer, ktime_set(1, 0), HRTIMER_MODE_REL); return 0; } static int enable_device(struct rmi_phys_device *phys) { int retval = 0; struct rmi_spi_data *data = phys->data; if (data->enabled) { dev_dbg(phys->dev, "Physical device already enabled.\n"); return 0; } retval = acquire_attn_irq(data); if (retval) goto error_exit; data->enabled = true; dev_dbg(phys->dev, "Physical device enabled.\n"); return 0; error_exit: dev_err(phys->dev, "Failed to enable physical device. Code=%d.\n", retval); return retval; } static void disable_device(struct rmi_phys_device *phys) { struct rmi_spi_data *data = phys->data; if (!data->enabled) { dev_warn(phys->dev, "Physical device already disabled.\n"); return; } disable_irq(data->irq); free_irq(data->irq, data->phys); dev_dbg(phys->dev, "Physical device disabled.\n"); data->enabled = false; } #define DUMMY_READ_SLEEP_US 10 static int rmi_spi_check_device(struct rmi_phys_device *rmi_phys) { u8 buf[6]; int error; int i; /* Some SPI subsystems return 0 for the very first read you do. So * we use this dummy read to get that out of the way. */ error = rmi_spi_v1_read_block(rmi_phys, PDT_START_SCAN_LOCATION, buf, sizeof(buf)); if (error < 0) { dev_err(rmi_phys->dev, "dummy read failed with %d.\n", error); return error; } udelay(DUMMY_READ_SLEEP_US); /* Force page select to 0. */ error = rmi_spi_v1_set_page(rmi_phys, 0x00); if (error < 0) return error; /* Now read the first PDT entry. We know where this is, and if the * RMI4 device is out there, these 6 bytes will be something other * than all 0x00 or 0xFF. We need to check for 0x00 and 0xFF, * because many (maybe all) SPI implementations will return all 0x00 * or all 0xFF on read if the device is not connected. */ error = rmi_spi_v1_read_block(rmi_phys, PDT_START_SCAN_LOCATION, buf, sizeof(buf)); if (error < 0) { dev_err(rmi_phys->dev, "probe read failed with %d.\n", error); return error; } for (i = 0; i < sizeof(buf); i++) { if (buf[i] != 0x00 && buf[i] != 0xFF) return error; } dev_err(rmi_phys->dev, "probe read returned invalid block.\n"); return -ENODEV; } static int __devinit rmi_spi_probe(struct spi_device *spi) { struct rmi_phys_device *rmi_phys; struct rmi_spi_data *data; struct rmi_device_platform_data *pdata = spi->dev.platform_data; u8 buf[2]; int retval; if (!pdata) { dev_err(&spi->dev, "no platform data\n"); return -EINVAL; } if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) return -EINVAL; spi->bits_per_word = 8; spi->mode = SPI_MODE_3; retval = spi_setup(spi); if (retval < 0) { dev_err(&spi->dev, "spi_setup failed!\n"); return retval; } rmi_phys = kzalloc(sizeof(struct rmi_phys_device), GFP_KERNEL); if (!rmi_phys) return -ENOMEM; data = kzalloc(sizeof(struct rmi_spi_data), GFP_KERNEL); if (!data) { retval = -ENOMEM; goto err_phys; } data->enabled = true; /* We plan to come up enabled. */ data->irq = gpio_to_irq(pdata->attn_gpio); data->irq_flags = (pdata->attn_polarity == RMI_ATTN_ACTIVE_HIGH) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; data->phys = rmi_phys; rmi_phys->data = data; rmi_phys->dev = &spi->dev; rmi_phys->write = rmi_spi_v1_write; rmi_phys->write_block = rmi_spi_v1_write_block; rmi_phys->read = rmi_spi_v1_read; rmi_phys->read_block = rmi_spi_v1_read_block; rmi_phys->enable_device = enable_device; rmi_phys->disable_device = disable_device; data->set_page = rmi_spi_v1_set_page; rmi_phys->info.proto = spi_v1_proto_name; mutex_init(&data->page_mutex); dev_set_drvdata(&spi->dev, rmi_phys); pdata->spi_data.block_delay_us = pdata->spi_data.block_delay_us ? pdata->spi_data.block_delay_us : RMI_SPI_BLOCK_DELAY_US; pdata->spi_data.read_delay_us = pdata->spi_data.read_delay_us ? pdata->spi_data.read_delay_us : RMI_SPI_BYTE_DELAY_US; pdata->spi_data.write_delay_us = pdata->spi_data.write_delay_us ? pdata->spi_data.write_delay_us : RMI_SPI_BYTE_DELAY_US; pdata->spi_data.split_read_block_delay_us = pdata->spi_data.split_read_block_delay_us ? pdata->spi_data.split_read_block_delay_us : RMI_SPI_BLOCK_DELAY_US; pdata->spi_data.split_read_byte_delay_us = pdata->spi_data.split_read_byte_delay_us ? pdata->spi_data.split_read_byte_delay_us : RMI_SPI_BYTE_DELAY_US; if (pdata->gpio_config) { retval = pdata->gpio_config(pdata->gpio_data, true); if (retval < 0) { dev_err(&spi->dev, "Failed to setup GPIOs, code: %d.\n", retval); goto err_data; } } retval = rmi_spi_check_device(rmi_phys); if (retval < 0) goto err_data; /* check if this is an SPI v2 device */ retval = rmi_spi_v1_read_block(rmi_phys, RMI_PROTOCOL_VERSION_ADDRESS, buf, 2); if (retval < 0) { dev_err(&spi->dev, "failed to get SPI version number!\n"); goto err_data; } dev_dbg(&spi->dev, "SPI version is %d", buf[0]); if (buf[0] == 1) { /* SPIv2 */ rmi_phys->write = rmi_spi_v2_write; rmi_phys->write_block = rmi_spi_v2_write_block; rmi_phys->read = rmi_spi_v2_read; data->set_page = rmi_spi_v2_set_page; rmi_phys->info.proto = spi_v2_proto_name; if (pdata->attn_gpio > 0) { init_completion(&data->irq_comp); rmi_phys->read_block = rmi_spi_v2_split_read_block; } else { dev_warn(&spi->dev, "WARNING: SPI V2 detected, but no " "attention GPIO was specified. This is unlikely" " to work well.\n"); rmi_phys->read_block = rmi_spi_v2_read_block; } } else if (buf[0] != 0) { dev_err(&spi->dev, "Unrecognized SPI version %d.\n", buf[0]); retval = -ENODEV; goto err_data; } retval = rmi_register_phys_device(rmi_phys); if (retval) { dev_err(&spi->dev, "failed to register physical driver\n"); goto err_data; } if (pdata->attn_gpio > 0) { retval = setup_attn(data); if (retval < 0) goto err_unregister; } else { retval = setup_polling(data); if (retval < 0) goto err_unregister; } dev_info(&spi->dev, "registered RMI SPI driver\n"); return 0; err_unregister: rmi_unregister_phys_device(rmi_phys); err_data: kfree(data); err_phys: kfree(rmi_phys); return retval; } static int __devexit rmi_spi_remove(struct spi_device *spi) { struct rmi_phys_device *phys = dev_get_drvdata(&spi->dev); struct rmi_device_platform_data *pd = spi->dev.platform_data; rmi_unregister_phys_device(phys); kfree(phys->data); kfree(phys); if (pd->gpio_config) pd->gpio_config(&spi->dev, false); return 0; } static const struct spi_device_id rmi_id[] = { { "rmi", 0 }, { "rmi_spi", 0 }, { } }; MODULE_DEVICE_TABLE(spi, rmi_id); static struct spi_driver rmi_spi_driver = { .driver = { .owner = THIS_MODULE, .name = "rmi_spi", }, .id_table = rmi_id, .probe = rmi_spi_probe, .remove = __devexit_p(rmi_spi_remove), }; static int __init rmi_spi_init(void) { return spi_register_driver(&rmi_spi_driver); } static void __exit rmi_spi_exit(void) { spi_unregister_driver(&rmi_spi_driver); } MODULE_AUTHOR("Christopher Heiny <[email protected]>"); MODULE_DESCRIPTION("RMI SPI driver"); MODULE_LICENSE("GPL"); module_init(rmi_spi_init); module_exit(rmi_spi_exit);
Java
SUBROUTINE sla_DTPS2C (XI, ETA, RA, DEC, RAZ1, DECZ1, : RAZ2, DECZ2, N) *+ * - - - - - - - * D T P S 2 C * - - - - - - - * * From the tangent plane coordinates of a star of known RA,Dec, * determine the RA,Dec of the tangent point. * * (double precision) * * Given: * XI,ETA d tangent plane rectangular coordinates * RA,DEC d spherical coordinates * * Returned: * RAZ1,DECZ1 d spherical coordinates of tangent point, solution 1 * RAZ2,DECZ2 d spherical coordinates of tangent point, solution 2 * N i number of solutions: * 0 = no solutions returned (note 2) * 1 = only the first solution is useful (note 3) * 2 = both solutions are useful (note 3) * * Notes: * * 1 The RAZ1 and RAZ2 values are returned in the range 0-2pi. * * 2 Cases where there is no solution can only arise near the poles. * For example, it is clearly impossible for a star at the pole * itself to have a non-zero XI value, and hence it is * meaningless to ask where the tangent point would have to be * to bring about this combination of XI and DEC. * * 3 Also near the poles, cases can arise where there are two useful * solutions. The argument N indicates whether the second of the * two solutions returned is useful. N=1 indicates only one useful * solution, the usual case; under these circumstances, the second * solution corresponds to the "over-the-pole" case, and this is * reflected in the values of RAZ2 and DECZ2 which are returned. * * 4 The DECZ1 and DECZ2 values are returned in the range +/-pi, but * in the usual, non-pole-crossing, case, the range is +/-pi/2. * * 5 This routine is the spherical equivalent of the routine sla_DTPV2C. * * Called: sla_DRANRM * * P.T.Wallace Starlink 5 June 1995 * * Copyright (C) 1995 Rutherford Appleton Laboratory * * License: * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (see SLA_CONDITIONS); if not, write to the * Free Software Foundation, Inc., 59 Temple Place, Suite 330, * Boston, MA 02111-1307 USA * *- IMPLICIT NONE DOUBLE PRECISION XI,ETA,RA,DEC,RAZ1,DECZ1,RAZ2,DECZ2 INTEGER N DOUBLE PRECISION X2,Y2,SD,CD,SDF,R2,R,S,C DOUBLE PRECISION sla_DRANRM X2=XI*XI Y2=ETA*ETA SD=SIN(DEC) CD=COS(DEC) SDF=SD*SQRT(1D0+X2+Y2) R2=CD*CD*(1D0+Y2)-SD*SD*X2 IF (R2.GE.0D0) THEN R=SQRT(R2) S=SDF-ETA*R C=SDF*ETA+R IF (XI.EQ.0D0.AND.R.EQ.0D0) R=1D0 RAZ1=sla_DRANRM(RA-ATAN2(XI,R)) DECZ1=ATAN2(S,C) R=-R S=SDF-ETA*R C=SDF*ETA+R RAZ2=sla_DRANRM(RA-ATAN2(XI,R)) DECZ2=ATAN2(S,C) IF (ABS(SDF).LT.1D0) THEN N=1 ELSE N=2 END IF ELSE N=0 END IF END
Java
/** * @file DllLoader.cpp * @author Minmin Gong * * @section DESCRIPTION * * This source file is part of KFL, a subproject of KlayGE * For the latest info, see http://www.klayge.org * * @section LICENSE * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * You may alternatively use this source under the terms of * the KlayGE Proprietary License (KPL). You can obtained such a license * from http://www.klayge.org/licensing/. */ #include <KFL/KFL.hpp> #include <KFL/ResIdentifier.hpp> #ifdef KLAYGE_PLATFORM_WINDOWS #include <windows.h> #else #include <dlfcn.h> #endif #include <KFL/DllLoader.hpp> namespace KlayGE { DllLoader::DllLoader() : dll_handle_(nullptr) { } DllLoader::~DllLoader() { this->Free(); } bool DllLoader::Load(std::string const & dll_name) { #ifdef KLAYGE_PLATFORM_WINDOWS #ifdef KLAYGE_PLATFORM_WINDOWS_DESKTOP dll_handle_ = static_cast<void*>(::LoadLibraryExA(dll_name.c_str(), nullptr, 0)); #else std::wstring wname; Convert(wname, dll_name); dll_handle_ = static_cast<void*>(::LoadPackagedLibrary(wname.c_str(), 0)); #endif #else dll_handle_ = ::dlopen(dll_name.c_str(), RTLD_LAZY); #endif return (dll_handle_ != nullptr); } void DllLoader::Free() { if (dll_handle_) { #ifdef KLAYGE_PLATFORM_WINDOWS ::FreeLibrary(static_cast<HMODULE>(dll_handle_)); #else ::dlclose(dll_handle_); #endif } } void* DllLoader::GetProcAddress(std::string const & proc_name) { #ifdef KLAYGE_PLATFORM_WINDOWS return reinterpret_cast<void*>(::GetProcAddress(static_cast<HMODULE>(dll_handle_), proc_name.c_str())); #else return ::dlsym(dll_handle_, proc_name.c_str()); #endif } }
Java
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <!-- This file documents the GNU Assembler "as". Copyright (C) 1991-2017 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled "GNU Free Documentation License". --> <!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ --> <head> <title>Using as: Nios II Directives</title> <meta name="description" content="Using as: Nios II Directives"> <meta name="keywords" content="Using as: Nios II Directives"> <meta name="resource-type" content="document"> <meta name="distribution" content="global"> <meta name="Generator" content="makeinfo"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <link href="index.html#Top" rel="start" title="Top"> <link href="AS-Index.html#AS-Index" rel="index" title="AS Index"> <link href="index.html#SEC_Contents" rel="contents" title="Table of Contents"> <link href="NiosII_002dDependent.html#NiosII_002dDependent" rel="up" title="NiosII-Dependent"> <link href="Nios-II-Opcodes.html#Nios-II-Opcodes" rel="next" title="Nios II Opcodes"> <link href="Nios-II-Relocations.html#Nios-II-Relocations" rel="prev" title="Nios II Relocations"> <style type="text/css"> <!-- a.summary-letter {text-decoration: none} blockquote.smallquotation {font-size: smaller} div.display {margin-left: 3.2em} div.example {margin-left: 3.2em} div.indentedblock {margin-left: 3.2em} div.lisp {margin-left: 3.2em} div.smalldisplay {margin-left: 3.2em} div.smallexample {margin-left: 3.2em} div.smallindentedblock {margin-left: 3.2em; font-size: smaller} div.smalllisp {margin-left: 3.2em} kbd {font-style:oblique} pre.display {font-family: inherit} pre.format {font-family: inherit} pre.menu-comment {font-family: serif} pre.menu-preformatted {font-family: serif} pre.smalldisplay {font-family: inherit; font-size: smaller} pre.smallexample {font-size: smaller} pre.smallformat {font-family: inherit; font-size: smaller} pre.smalllisp {font-size: smaller} span.nocodebreak {white-space:nowrap} span.nolinebreak {white-space:nowrap} span.roman {font-family:serif; font-weight:normal} span.sansserif {font-family:sans-serif; font-weight:normal} ul.no-bullet {list-style: none} --> </style> </head> <body lang="en" bgcolor="#FFFFFF" text="#000000" link="#0000FF" vlink="#800080" alink="#FF0000"> <a name="Nios-II-Directives"></a> <div class="header"> <p> Next: <a href="Nios-II-Opcodes.html#Nios-II-Opcodes" accesskey="n" rel="next">Nios II Opcodes</a>, Previous: <a href="Nios-II-Relocations.html#Nios-II-Relocations" accesskey="p" rel="prev">Nios II Relocations</a>, Up: <a href="NiosII_002dDependent.html#NiosII_002dDependent" accesskey="u" rel="up">NiosII-Dependent</a> &nbsp; [<a href="index.html#SEC_Contents" title="Table of contents" rel="contents">Contents</a>][<a href="AS-Index.html#AS-Index" title="Index" rel="index">Index</a>]</p> </div> <hr> <a name="Nios-II-Machine-Directives"></a> <h4 class="subsection">9.31.4 Nios II Machine Directives</h4> <a name="index-machine-directives_002c-Nios-II"></a> <a name="index-Nios-II-machine-directives"></a> <dl compact="compact"> <dd> <a name="index-align-directive_002c-Nios-II"></a> </dd> <dt><code>.align <var>expression</var> [, <var>expression</var>]</code></dt> <dd><p>This is the generic <code>.align</code> directive, however this aligns to a power of two. </p> <a name="index-half-directive_002c-Nios-II"></a> </dd> <dt><code>.half <var>expression</var></code></dt> <dd><p>Create an aligned constant 2 bytes in size. </p> <a name="index-word-directive_002c-Nios-II"></a> </dd> <dt><code>.word <var>expression</var></code></dt> <dd><p>Create an aligned constant 4 bytes in size. </p> <a name="index-dword-directive_002c-Nios-II"></a> </dd> <dt><code>.dword <var>expression</var></code></dt> <dd><p>Create an aligned constant 8 bytes in size. </p> <a name="index-2byte-directive_002c-Nios-II"></a> </dd> <dt><code>.2byte <var>expression</var></code></dt> <dd><p>Create an unaligned constant 2 bytes in size. </p> <a name="index-4byte-directive_002c-Nios-II"></a> </dd> <dt><code>.4byte <var>expression</var></code></dt> <dd><p>Create an unaligned constant 4 bytes in size. </p> <a name="index-8byte-directive_002c-Nios-II"></a> </dd> <dt><code>.8byte <var>expression</var></code></dt> <dd><p>Create an unaligned constant 8 bytes in size. </p> <a name="index-16byte-directive_002c-Nios-II"></a> </dd> <dt><code>.16byte <var>expression</var></code></dt> <dd><p>Create an unaligned constant 16 bytes in size. </p> <a name="index-set-noat-directive_002c-Nios-II"></a> </dd> <dt><code>.set noat</code></dt> <dd><p>Allows assembly code to use <code>at</code> register without warning. Macro or relaxation expansions generate warnings. </p> <a name="index-set-at-directive_002c-Nios-II"></a> </dd> <dt><code>.set at</code></dt> <dd><p>Assembly code using <code>at</code> register generates warnings, and macro expansion and relaxation are enabled. </p> <a name="index-set-nobreak-directive_002c-Nios-II"></a> </dd> <dt><code>.set nobreak</code></dt> <dd><p>Allows assembly code to use <code>ba</code> and <code>bt</code> registers without warning. </p> <a name="index-set-break-directive_002c-Nios-II"></a> </dd> <dt><code>.set break</code></dt> <dd><p>Turns warnings back on for using <code>ba</code> and <code>bt</code> registers. </p> <a name="index-set-norelax-directive_002c-Nios-II"></a> </dd> <dt><code>.set norelax</code></dt> <dd><p>Do not replace any branches or calls. </p> <a name="index-set-relaxsection-directive_002c-Nios-II"></a> </dd> <dt><code>.set relaxsection</code></dt> <dd><p>Replace identified out-of-range branches with <code>jmp</code> sequences (default). </p> <a name="index-set-relaxall-directive_002c-Nios-II"></a> </dd> <dt><code>.set relaxsection</code></dt> <dd><p>Replace all branch and call instructions with <code>jmp</code> and <code>callr</code> sequences. </p> <a name="index-set-directive_002c-Nios-II"></a> </dd> <dt><code>.set &hellip;</code></dt> <dd><p>All other <code>.set</code> are the normal use. </p> </dd> </dl> <hr> <div class="header"> <p> Next: <a href="Nios-II-Opcodes.html#Nios-II-Opcodes" accesskey="n" rel="next">Nios II Opcodes</a>, Previous: <a href="Nios-II-Relocations.html#Nios-II-Relocations" accesskey="p" rel="prev">Nios II Relocations</a>, Up: <a href="NiosII_002dDependent.html#NiosII_002dDependent" accesskey="u" rel="up">NiosII-Dependent</a> &nbsp; [<a href="index.html#SEC_Contents" title="Table of contents" rel="contents">Contents</a>][<a href="AS-Index.html#AS-Index" title="Index" rel="index">Index</a>]</p> </div> </body> </html>
Java
ccflags-y += -I$(srctree)/drivers/misc/mediatek/thermal/fakeHeader/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/include/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/spm_v1 ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/cpuidle_v1/ ifeq ($(CONFIG_ARCH_MT6580),y) ccflags-y += -I$(srctree)/drivers/misc/mediatek/video/$(MTK_PLATFORM)/videox/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/video/$(MTK_PLATFORM)/dispsys/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/video/include/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/v2/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/v2/$(MTK_PLATFORM)/ ccflags-y += -I$(srctree)/drivers/misc/mediatek/lcm/inc/ endif obj-y += mt_spm_mtcmos.o obj-y += mt_clkmgr_common.o obj-y += mt_clkmgr.o obj-y += mt_golden_setting.o obj-y += mt_clkbuf_ctl.o obj-y += mt_pm_init.o obj-y += mt_sleep.o obj-y += mt_cpufreq.o obj-y += mt_gpufreq.o obj-y += mt_ptp.o obj-y += mt_dcm.o obj-y += mt-smp.o obj-y += hotplug.o obj-y += mt-headsmp.o obj-y += mt_hotplug.o obj-y += mt_hotplug_strategy_main.o obj-y += mt_hotplug_strategy_core.o obj-y += mt_hotplug_strategy_procfs.o obj-y += mt_hotplug_strategy_cpu.o obj-y += mt_hotplug_strategy_api.o obj-y += mt_hotplug_strategy_algo.o
Java
/* * * This source code is part of * * G R O M A C S * * GROningen MAchine for Chemical Simulations * * VERSION 3.2.0 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others. * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team, * check out http://www.gromacs.org for more information. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * If you want to redistribute modifications, please consider that * scientific software is very special. Version control is crucial - * bugs must be traceable. We will be happy to consider code for * inclusion in the official distribution, but derived work must not * be called official GROMACS. Details are found in the README & COPYING * files - if they are missing, get the official version at www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the papers on the package - you can find them in the top README file. * * For more info, check our website at http://www.gromacs.org * * And Hey: * GROwing Monsters And Cloning Shrimps */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include "typedefs.h" #include "vsite.h" #include "macros.h" #include "smalloc.h" #include "nrnb.h" #include "vec.h" #include "mvdata.h" #include "network.h" #include "mshift.h" #include "pbc.h" #include "domdec.h" #include "partdec.h" #include "mtop_util.h" /* Routines to send/recieve coordinates and force * of constructing atoms. */ static void move_construct_x(t_comm_vsites *vsitecomm, rvec x[], t_commrec *cr) { rvec *sendbuf; rvec *recvbuf; int i,ia; sendbuf = vsitecomm->send_buf; recvbuf = vsitecomm->recv_buf; /* Prepare pulse left by copying to send buffer */ for(i=0;i<vsitecomm->left_export_nconstruct;i++) { ia = vsitecomm->left_export_construct[i]; copy_rvec(x[ia],sendbuf[i]); } /* Pulse coordinates left */ gmx_tx_rx_real(cr,GMX_LEFT,(real *)sendbuf,3*vsitecomm->left_export_nconstruct,GMX_RIGHT,(real *)recvbuf,3*vsitecomm->right_import_nconstruct); /* Copy from receive buffer to coordinate array */ for(i=0;i<vsitecomm->right_import_nconstruct;i++) { ia = vsitecomm->right_import_construct[i]; copy_rvec(recvbuf[i],x[ia]); } /* Prepare pulse right by copying to send buffer */ for(i=0;i<vsitecomm->right_export_nconstruct;i++) { ia = vsitecomm->right_export_construct[i]; copy_rvec(x[ia],sendbuf[i]); } /* Pulse coordinates right */ gmx_tx_rx_real(cr,GMX_RIGHT,(real *)sendbuf,3*vsitecomm->right_export_nconstruct,GMX_LEFT,(real *)recvbuf,3*vsitecomm->left_import_nconstruct); /* Copy from receive buffer to coordinate array */ for(i=0;i<vsitecomm->left_import_nconstruct;i++) { ia = vsitecomm->left_import_construct[i]; copy_rvec(recvbuf[i],x[ia]); } } static void move_construct_f(t_comm_vsites *vsitecomm, rvec f[], t_commrec *cr) { rvec *sendbuf; rvec *recvbuf; int i,ia; sendbuf = vsitecomm->send_buf; recvbuf = vsitecomm->recv_buf; /* Prepare pulse right by copying to send buffer */ for(i=0;i<vsitecomm->right_import_nconstruct;i++) { ia = vsitecomm->right_import_construct[i]; copy_rvec(f[ia],sendbuf[i]); clear_rvec(f[ia]); /* Zero it here after moving, just to simplify debug book-keeping... */ } /* Pulse forces right */ gmx_tx_rx_real(cr,GMX_RIGHT,(real *)sendbuf,3*vsitecomm->right_import_nconstruct,GMX_LEFT,(real *)recvbuf,3*vsitecomm->left_export_nconstruct); /* Copy from receive buffer to coordinate array */ for(i=0;i<vsitecomm->left_export_nconstruct;i++) { ia = vsitecomm->left_export_construct[i]; rvec_inc(f[ia],recvbuf[i]); } /* Prepare pulse left by copying to send buffer */ for(i=0;i<vsitecomm->left_import_nconstruct;i++) { ia = vsitecomm->left_import_construct[i]; copy_rvec(f[ia],sendbuf[i]); clear_rvec(f[ia]); /* Zero it here after moving, just to simplify debug book-keeping... */ } /* Pulse coordinates left */ gmx_tx_rx_real(cr,GMX_LEFT,(real *)sendbuf,3*vsitecomm->left_import_nconstruct,GMX_RIGHT,(real *)recvbuf,3*vsitecomm->right_export_nconstruct); /* Copy from receive buffer to coordinate array */ for(i=0;i<vsitecomm->right_export_nconstruct;i++) { ia = vsitecomm->right_export_construct[i]; rvec_inc(f[ia],recvbuf[i]); } /* All forces are now on the home processors */ } static void pd_clear_nonlocal_constructs(t_comm_vsites *vsitecomm, rvec f[]) { int i,ia; for(i=0;i<vsitecomm->left_import_nconstruct;i++) { ia = vsitecomm->left_import_construct[i]; clear_rvec(f[ia]); } for(i=0;i<vsitecomm->right_import_nconstruct;i++) { ia = vsitecomm->right_import_construct[i]; clear_rvec(f[ia]); } } static int pbc_rvec_sub(const t_pbc *pbc,const rvec xi,const rvec xj,rvec dx) { if (pbc) { return pbc_dx_aiuc(pbc,xi,xj,dx); } else { rvec_sub(xi,xj,dx); return CENTRAL; } } /* Vsite construction routines */ static void constr_vsite2(rvec xi,rvec xj,rvec x,real a,t_pbc *pbc) { real b; rvec dx; b=1.0-a; /* 1 flop */ if (pbc) { pbc_dx_aiuc(pbc,xj,xi,dx); x[XX] = xi[XX] + a*dx[XX]; x[YY] = xi[YY] + a*dx[YY]; x[ZZ] = xi[ZZ] + a*dx[ZZ]; } else { x[XX] = b*xi[XX] + a*xj[XX]; x[YY] = b*xi[YY] + a*xj[YY]; x[ZZ] = b*xi[ZZ] + a*xj[ZZ]; /* 9 Flops */ } /* TOTAL: 10 flops */ } static void constr_vsite3(rvec xi,rvec xj,rvec xk,rvec x,real a,real b, t_pbc *pbc) { real c; rvec dxj,dxk; c=1.0-a-b; /* 2 flops */ if (pbc) { pbc_dx_aiuc(pbc,xj,xi,dxj); pbc_dx_aiuc(pbc,xk,xi,dxk); x[XX] = xi[XX] + a*dxj[XX] + b*dxk[XX]; x[YY] = xi[YY] + a*dxj[YY] + b*dxk[YY]; x[ZZ] = xi[ZZ] + a*dxj[ZZ] + b*dxk[ZZ]; } else { x[XX] = c*xi[XX] + a*xj[XX] + b*xk[XX]; x[YY] = c*xi[YY] + a*xj[YY] + b*xk[YY]; x[ZZ] = c*xi[ZZ] + a*xj[ZZ] + b*xk[ZZ]; /* 15 Flops */ } /* TOTAL: 17 flops */ } static void constr_vsite3FD(rvec xi,rvec xj,rvec xk,rvec x,real a,real b, t_pbc *pbc) { rvec xij,xjk,temp; real c; pbc_rvec_sub(pbc,xj,xi,xij); pbc_rvec_sub(pbc,xk,xj,xjk); /* 6 flops */ /* temp goes from i to a point on the line jk */ temp[XX] = xij[XX] + a*xjk[XX]; temp[YY] = xij[YY] + a*xjk[YY]; temp[ZZ] = xij[ZZ] + a*xjk[ZZ]; /* 6 flops */ c=b*gmx_invsqrt(iprod(temp,temp)); /* 6 + 10 flops */ x[XX] = xi[XX] + c*temp[XX]; x[YY] = xi[YY] + c*temp[YY]; x[ZZ] = xi[ZZ] + c*temp[ZZ]; /* 6 Flops */ /* TOTAL: 34 flops */ } static void constr_vsite3FAD(rvec xi,rvec xj,rvec xk,rvec x,real a,real b, t_pbc *pbc) { rvec xij,xjk,xp; real a1,b1,c1,invdij; pbc_rvec_sub(pbc,xj,xi,xij); pbc_rvec_sub(pbc,xk,xj,xjk); /* 6 flops */ invdij = gmx_invsqrt(iprod(xij,xij)); c1 = invdij * invdij * iprod(xij,xjk); xp[XX] = xjk[XX] - c1*xij[XX]; xp[YY] = xjk[YY] - c1*xij[YY]; xp[ZZ] = xjk[ZZ] - c1*xij[ZZ]; a1 = a*invdij; b1 = b*gmx_invsqrt(iprod(xp,xp)); /* 45 */ x[XX] = xi[XX] + a1*xij[XX] + b1*xp[XX]; x[YY] = xi[YY] + a1*xij[YY] + b1*xp[YY]; x[ZZ] = xi[ZZ] + a1*xij[ZZ] + b1*xp[ZZ]; /* 12 Flops */ /* TOTAL: 63 flops */ } static void constr_vsite3OUT(rvec xi,rvec xj,rvec xk,rvec x, real a,real b,real c,t_pbc *pbc) { rvec xij,xik,temp; pbc_rvec_sub(pbc,xj,xi,xij); pbc_rvec_sub(pbc,xk,xi,xik); cprod(xij,xik,temp); /* 15 Flops */ x[XX] = xi[XX] + a*xij[XX] + b*xik[XX] + c*temp[XX]; x[YY] = xi[YY] + a*xij[YY] + b*xik[YY] + c*temp[YY]; x[ZZ] = xi[ZZ] + a*xij[ZZ] + b*xik[ZZ] + c*temp[ZZ]; /* 18 Flops */ /* TOTAL: 33 flops */ } static void constr_vsite4FD(rvec xi,rvec xj,rvec xk,rvec xl,rvec x, real a,real b,real c,t_pbc *pbc) { rvec xij,xjk,xjl,temp; real d; pbc_rvec_sub(pbc,xj,xi,xij); pbc_rvec_sub(pbc,xk,xj,xjk); pbc_rvec_sub(pbc,xl,xj,xjl); /* 9 flops */ /* temp goes from i to a point on the plane jkl */ temp[XX] = xij[XX] + a*xjk[XX] + b*xjl[XX]; temp[YY] = xij[YY] + a*xjk[YY] + b*xjl[YY]; temp[ZZ] = xij[ZZ] + a*xjk[ZZ] + b*xjl[ZZ]; /* 12 flops */ d=c*gmx_invsqrt(iprod(temp,temp)); /* 6 + 10 flops */ x[XX] = xi[XX] + d*temp[XX]; x[YY] = xi[YY] + d*temp[YY]; x[ZZ] = xi[ZZ] + d*temp[ZZ]; /* 6 Flops */ /* TOTAL: 43 flops */ } static void constr_vsite4FDN(rvec xi,rvec xj,rvec xk,rvec xl,rvec x, real a,real b,real c,t_pbc *pbc) { rvec xij,xik,xil,ra,rb,rja,rjb,rm; real d; pbc_rvec_sub(pbc,xj,xi,xij); pbc_rvec_sub(pbc,xk,xi,xik); pbc_rvec_sub(pbc,xl,xi,xil); /* 9 flops */ ra[XX] = a*xik[XX]; ra[YY] = a*xik[YY]; ra[ZZ] = a*xik[ZZ]; rb[XX] = b*xil[XX]; rb[YY] = b*xil[YY]; rb[ZZ] = b*xil[ZZ]; /* 6 flops */ rvec_sub(ra,xij,rja); rvec_sub(rb,xij,rjb); /* 6 flops */ cprod(rja,rjb,rm); /* 9 flops */ d=c*gmx_invsqrt(norm2(rm)); /* 5+5+1 flops */ x[XX] = xi[XX] + d*rm[XX]; x[YY] = xi[YY] + d*rm[YY]; x[ZZ] = xi[ZZ] + d*rm[ZZ]; /* 6 Flops */ /* TOTAL: 47 flops */ } static int constr_vsiten(t_iatom *ia, t_iparams ip[], rvec *x, t_pbc *pbc) { rvec xs,x1,dx; dvec dsum; int n3,av,ai,i; real a; n3 = 3*ip[ia[0]].vsiten.n; av = ia[1]; ai = ia[2]; copy_rvec(x[ai],x1); clear_dvec(dsum); for(i=3; i<n3; i+=3) { ai = ia[i+2]; a = ip[ia[i]].vsiten.a; if (pbc) { pbc_dx_aiuc(pbc,x[ai],x1,dx); } else { rvec_sub(x[ai],x1,dx); } dsum[XX] += a*dx[XX]; dsum[YY] += a*dx[YY]; dsum[ZZ] += a*dx[ZZ]; /* 9 Flops */ } x[av][XX] = x1[XX] + dsum[XX]; x[av][YY] = x1[YY] + dsum[YY]; x[av][ZZ] = x1[ZZ] + dsum[ZZ]; return n3; } void construct_vsites(FILE *log,gmx_vsite_t *vsite, rvec x[],t_nrnb *nrnb, real dt,rvec *v, t_iparams ip[],t_ilist ilist[], int ePBC,gmx_bool bMolPBC,t_graph *graph, t_commrec *cr,matrix box) { rvec xpbc,xv,vv,dx; real a1,b1,c1,inv_dt; int i,inc,ii,nra,nr,tp,ftype; t_iatom avsite,ai,aj,ak,al,pbc_atom; t_iatom *ia; t_pbc pbc,*pbc_null,*pbc_null2; gmx_bool bDomDec; int *vsite_pbc,ishift; rvec reftmp,vtmp,rtmp; bDomDec = cr && DOMAINDECOMP(cr); /* We only need to do pbc when we have inter-cg vsites */ if (ePBC != epbcNONE && (bDomDec || bMolPBC) && vsite->n_intercg_vsite) { /* This is wasting some CPU time as we now do this multiple times * per MD step. But how often do we have vsites with full pbc? */ pbc_null = set_pbc_dd(&pbc,ePBC,cr!=NULL ? cr->dd : NULL,FALSE,box); } else { pbc_null = NULL; } if (cr) { if (bDomDec) { dd_move_x_vsites(cr->dd,box,x); } else if (vsite->bPDvsitecomm) { /* I'm not sure whether the periodicity and shift are guaranteed * to be consistent between different nodes when running e.g. polymers * in parallel. In this special case we thus unshift/shift, * but only when necessary. This is to make sure the coordinates * we move don't end up a box away... */ if (graph) unshift_self(graph,box,x); move_construct_x(vsite->vsitecomm,x,cr); if (graph) shift_self(graph,box,x); } } if (v) { inv_dt = 1.0/dt; } else { inv_dt = 1.0; } pbc_null2 = NULL; for(ftype=0; (ftype<F_NRE); ftype++) { if (interaction_function[ftype].flags & IF_VSITE) { nra = interaction_function[ftype].nratoms; nr = ilist[ftype].nr; ia = ilist[ftype].iatoms; if (pbc_null) { vsite_pbc = vsite->vsite_pbc_loc[ftype-F_VSITE2]; } else { vsite_pbc = NULL; } for(i=0; (i<nr); ) { tp = ia[0]; /* if (ftype != idef->functype[tp]) gmx_incons("Function types for vsites wrong"); */ /* The vsite and constructing atoms */ avsite = ia[1]; ai = ia[2]; aj = ia[3]; /* Constants for constructing vsites */ a1 = ip[tp].vsite.a; /* Check what kind of pbc we need to use */ if (vsite_pbc) { pbc_atom = vsite_pbc[i/(1+nra)]; if (pbc_atom > -2) { if (pbc_atom >= 0) { /* We need to copy the coordinates here, * single for single atom cg's pbc_atom is the vsite itself. */ copy_rvec(x[pbc_atom],xpbc); } pbc_null2 = pbc_null; } else { pbc_null2 = NULL; } } else { pbc_atom = -2; } /* Copy the old position */ copy_rvec(x[avsite],xv); /* Construct the vsite depending on type */ inc = nra+1; switch (ftype) { case F_VSITE2: constr_vsite2(x[ai],x[aj],x[avsite],a1,pbc_null2); break; case F_VSITE3: ak = ia[4]; b1 = ip[tp].vsite.b; constr_vsite3(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2); break; case F_VSITE3FD: ak = ia[4]; b1 = ip[tp].vsite.b; constr_vsite3FD(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2); break; case F_VSITE3FAD: ak = ia[4]; b1 = ip[tp].vsite.b; constr_vsite3FAD(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2); break; case F_VSITE3OUT: ak = ia[4]; b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; constr_vsite3OUT(x[ai],x[aj],x[ak],x[avsite],a1,b1,c1,pbc_null2); break; case F_VSITE4FD: ak = ia[4]; al = ia[5]; b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; constr_vsite4FD(x[ai],x[aj],x[ak],x[al],x[avsite],a1,b1,c1, pbc_null2); break; case F_VSITE4FDN: ak = ia[4]; al = ia[5]; b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; constr_vsite4FDN(x[ai],x[aj],x[ak],x[al],x[avsite],a1,b1,c1, pbc_null2); break; case F_VSITEN: inc = constr_vsiten(ia,ip,x,pbc_null2); break; default: gmx_fatal(FARGS,"No such vsite type %d in %s, line %d", ftype,__FILE__,__LINE__); } if (pbc_atom >= 0) { /* Match the pbc of this vsite to the rest of its charge group */ ishift = pbc_dx_aiuc(pbc_null,x[avsite],xpbc,dx); if (ishift != CENTRAL) rvec_add(xpbc,dx,x[avsite]); } if (v) { /* Calculate velocity of vsite... */ rvec_sub(x[avsite],xv,vv); svmul(inv_dt,vv,v[avsite]); } /* Increment loop variables */ i += inc; ia += inc; } } } } static void spread_vsite2(t_iatom ia[],real a, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec fi,fj,dx; t_iatom av,ai,aj; ivec di; real b; int siv,sij; av = ia[1]; ai = ia[2]; aj = ia[3]; svmul(1-a,f[av],fi); svmul( a,f[av],fj); /* 7 flop */ rvec_inc(f[ai],fi); rvec_inc(f[aj],fj); /* 6 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,av),di); siv = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,aj),di); sij = IVEC2IS(di); } else if (pbc) { siv = pbc_dx_aiuc(pbc,x[ai],x[av],dx); sij = pbc_dx_aiuc(pbc,x[ai],x[aj],dx); } else { siv = CENTRAL; sij = CENTRAL; } if (fshift && (siv != CENTRAL || sij != CENTRAL)) { rvec_inc(fshift[siv],f[av]); rvec_dec(fshift[CENTRAL],fi); rvec_dec(fshift[sij],fj); } /* TOTAL: 13 flops */ } void construct_vsites_mtop(FILE *log,gmx_vsite_t *vsite, gmx_mtop_t *mtop,rvec x[]) { int as,mb,mol; gmx_molblock_t *molb; gmx_moltype_t *molt; as = 0; for(mb=0; mb<mtop->nmolblock; mb++) { molb = &mtop->molblock[mb]; molt = &mtop->moltype[molb->type]; for(mol=0; mol<molb->nmol; mol++) { construct_vsites(log,vsite,x+as,NULL,0.0,NULL, mtop->ffparams.iparams,molt->ilist, epbcNONE,TRUE,NULL,NULL,NULL); as += molt->atoms.nr; } } } static void spread_vsite3(t_iatom ia[],real a,real b, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec fi,fj,fk,dx; atom_id av,ai,aj,ak; ivec di; int siv,sij,sik; av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; svmul(1-a-b,f[av],fi); svmul( a,f[av],fj); svmul( b,f[av],fk); /* 11 flops */ rvec_inc(f[ai],fi); rvec_inc(f[aj],fj); rvec_inc(f[ak],fk); /* 9 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,ia[1]),di); siv = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,aj),di); sij = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,ak),di); sik = IVEC2IS(di); } else if (pbc) { siv = pbc_dx_aiuc(pbc,x[ai],x[av],dx); sij = pbc_dx_aiuc(pbc,x[ai],x[aj],dx); sik = pbc_dx_aiuc(pbc,x[ai],x[ak],dx); } else { siv = CENTRAL; sij = CENTRAL; sik = CENTRAL; } if (fshift && (siv!=CENTRAL || sij!=CENTRAL || sik!=CENTRAL)) { rvec_inc(fshift[siv],f[av]); rvec_dec(fshift[CENTRAL],fi); rvec_dec(fshift[sij],fj); rvec_dec(fshift[sik],fk); } /* TOTAL: 20 flops */ } static void spread_vsite3FD(t_iatom ia[],real a,real b, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { real fx,fy,fz,c,invl,fproj,a1; rvec xvi,xij,xjk,xix,fv,temp; t_iatom av,ai,aj,ak; int svi,sji,skj,d; ivec di; av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; copy_rvec(f[av],fv); sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij); skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk); /* 6 flops */ /* xix goes from i to point x on the line jk */ xix[XX]=xij[XX]+a*xjk[XX]; xix[YY]=xij[YY]+a*xjk[YY]; xix[ZZ]=xij[ZZ]+a*xjk[ZZ]; /* 6 flops */ invl=gmx_invsqrt(iprod(xix,xix)); c=b*invl; /* 4 + ?10? flops */ fproj=iprod(xix,fv)*invl*invl; /* = (xix . f)/(xix . xix) */ temp[XX]=c*(fv[XX]-fproj*xix[XX]); temp[YY]=c*(fv[YY]-fproj*xix[YY]); temp[ZZ]=c*(fv[ZZ]-fproj*xix[ZZ]); /* 16 */ /* c is already calculated in constr_vsite3FD storing c somewhere will save 26 flops! */ a1=1-a; f[ai][XX] += fv[XX] - temp[XX]; f[ai][YY] += fv[YY] - temp[YY]; f[ai][ZZ] += fv[ZZ] - temp[ZZ]; f[aj][XX] += a1*temp[XX]; f[aj][YY] += a1*temp[YY]; f[aj][ZZ] += a1*temp[ZZ]; f[ak][XX] += a*temp[XX]; f[ak][YY] += a*temp[YY]; f[ak][ZZ] += a*temp[ZZ]; /* 19 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di); svi = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di); sji = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di); skj = IVEC2IS(di); } else if (pbc) { svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi); } else { svi = CENTRAL; } if (fshift && (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL)) { rvec_dec(fshift[svi],fv); fshift[CENTRAL][XX] += fv[XX] - (1 + a)*temp[XX]; fshift[CENTRAL][YY] += fv[YY] - (1 + a)*temp[YY]; fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a)*temp[ZZ]; fshift[ sji][XX] += temp[XX]; fshift[ sji][YY] += temp[YY]; fshift[ sji][ZZ] += temp[ZZ]; fshift[ skj][XX] += a*temp[XX]; fshift[ skj][YY] += a*temp[YY]; fshift[ skj][ZZ] += a*temp[ZZ]; } /* TOTAL: 61 flops */ } static void spread_vsite3FAD(t_iatom ia[],real a,real b, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec xvi,xij,xjk,xperp,Fpij,Fppp,fv,f1,f2,f3; real a1,b1,c1,c2,invdij,invdij2,invdp,fproj; t_iatom av,ai,aj,ak; int svi,sji,skj,d; ivec di; av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; copy_rvec(f[ia[1]],fv); sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij); skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk); /* 6 flops */ invdij = gmx_invsqrt(iprod(xij,xij)); invdij2 = invdij * invdij; c1 = iprod(xij,xjk) * invdij2; xperp[XX] = xjk[XX] - c1*xij[XX]; xperp[YY] = xjk[YY] - c1*xij[YY]; xperp[ZZ] = xjk[ZZ] - c1*xij[ZZ]; /* xperp in plane ijk, perp. to ij */ invdp = gmx_invsqrt(iprod(xperp,xperp)); a1 = a*invdij; b1 = b*invdp; /* 45 flops */ /* a1, b1 and c1 are already calculated in constr_vsite3FAD storing them somewhere will save 45 flops! */ fproj=iprod(xij ,fv)*invdij2; svmul(fproj, xij, Fpij); /* proj. f on xij */ svmul(iprod(xperp,fv)*invdp*invdp,xperp,Fppp); /* proj. f on xperp */ svmul(b1*fproj, xperp,f3); /* 23 flops */ rvec_sub(fv,Fpij,f1); /* f1 = f - Fpij */ rvec_sub(f1,Fppp,f2); /* f2 = f - Fpij - Fppp */ for (d=0; (d<DIM); d++) { f1[d]*=a1; f2[d]*=b1; } /* 12 flops */ c2=1+c1; f[ai][XX] += fv[XX] - f1[XX] + c1*f2[XX] + f3[XX]; f[ai][YY] += fv[YY] - f1[YY] + c1*f2[YY] + f3[YY]; f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1*f2[ZZ] + f3[ZZ]; f[aj][XX] += f1[XX] - c2*f2[XX] - f3[XX]; f[aj][YY] += f1[YY] - c2*f2[YY] - f3[YY]; f[aj][ZZ] += f1[ZZ] - c2*f2[ZZ] - f3[ZZ]; f[ak][XX] += f2[XX]; f[ak][YY] += f2[YY]; f[ak][ZZ] += f2[ZZ]; /* 30 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di); svi = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di); sji = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di); skj = IVEC2IS(di); } else if (pbc) { svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi); } else { svi = CENTRAL; } if (fshift && (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL)) { rvec_dec(fshift[svi],fv); fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1-c1)*f2[XX] + f3[XX]; fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1-c1)*f2[YY] + f3[YY]; fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1-c1)*f2[ZZ] + f3[ZZ]; fshift[ sji][XX] += f1[XX] - c1 *f2[XX] - f3[XX]; fshift[ sji][YY] += f1[YY] - c1 *f2[YY] - f3[YY]; fshift[ sji][ZZ] += f1[ZZ] - c1 *f2[ZZ] - f3[ZZ]; fshift[ skj][XX] += f2[XX]; fshift[ skj][YY] += f2[YY]; fshift[ skj][ZZ] += f2[ZZ]; } /* TOTAL: 113 flops */ } static void spread_vsite3OUT(t_iatom ia[],real a,real b,real c, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec xvi,xij,xik,fv,fj,fk; real cfx,cfy,cfz; atom_id av,ai,aj,ak; ivec di; int svi,sji,ski; av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij); ski = pbc_rvec_sub(pbc,x[ak],x[ai],xik); /* 6 Flops */ copy_rvec(f[av],fv); cfx = c*fv[XX]; cfy = c*fv[YY]; cfz = c*fv[ZZ]; /* 3 Flops */ fj[XX] = a*fv[XX] - xik[ZZ]*cfy + xik[YY]*cfz; fj[YY] = xik[ZZ]*cfx + a*fv[YY] - xik[XX]*cfz; fj[ZZ] = -xik[YY]*cfx + xik[XX]*cfy + a*fv[ZZ]; fk[XX] = b*fv[XX] + xij[ZZ]*cfy - xij[YY]*cfz; fk[YY] = -xij[ZZ]*cfx + b*fv[YY] + xij[XX]*cfz; fk[ZZ] = xij[YY]*cfx - xij[XX]*cfy + b*fv[ZZ]; /* 30 Flops */ f[ai][XX] += fv[XX] - fj[XX] - fk[XX]; f[ai][YY] += fv[YY] - fj[YY] - fk[YY]; f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ]; rvec_inc(f[aj],fj); rvec_inc(f[ak],fk); /* 15 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di); svi = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di); sji = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,ai),di); ski = IVEC2IS(di); } else if (pbc) { svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi); } else { svi = CENTRAL; } if (fshift && (svi!=CENTRAL || sji!=CENTRAL || ski!=CENTRAL)) { rvec_dec(fshift[svi],fv); fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX]; fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY]; fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ]; rvec_inc(fshift[sji],fj); rvec_inc(fshift[ski],fk); } /* TOTAL: 54 flops */ } static void spread_vsite4FD(t_iatom ia[],real a,real b,real c, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { real d,invl,fproj,a1; rvec xvi,xij,xjk,xjl,xix,fv,temp; atom_id av,ai,aj,ak,al; ivec di; int svi,sji,skj,slj,m; av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; al = ia[5]; sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij); skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk); slj = pbc_rvec_sub(pbc,x[al],x[aj],xjl); /* 9 flops */ /* xix goes from i to point x on the plane jkl */ for(m=0; m<DIM; m++) xix[m] = xij[m] + a*xjk[m] + b*xjl[m]; /* 12 flops */ invl=gmx_invsqrt(iprod(xix,xix)); d=c*invl; /* 4 + ?10? flops */ copy_rvec(f[av],fv); fproj=iprod(xix,fv)*invl*invl; /* = (xix . f)/(xix . xix) */ for(m=0; m<DIM; m++) temp[m] = d*(fv[m] - fproj*xix[m]); /* 16 */ /* c is already calculated in constr_vsite3FD storing c somewhere will save 35 flops! */ a1 = 1 - a - b; for(m=0; m<DIM; m++) { f[ai][m] += fv[m] - temp[m]; f[aj][m] += a1*temp[m]; f[ak][m] += a*temp[m]; f[al][m] += b*temp[m]; } /* 26 Flops */ if (g) { ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di); svi = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di); sji = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di); skj = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,al),SHIFT_IVEC(g,aj),di); slj = IVEC2IS(di); } else if (pbc) { svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi); } else { svi = CENTRAL; } if (fshift && (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL || slj!=CENTRAL)) { rvec_dec(fshift[svi],fv); for(m=0; m<DIM; m++) { fshift[CENTRAL][m] += fv[m] - (1 + a + b)*temp[m]; fshift[ sji][m] += temp[m]; fshift[ skj][m] += a*temp[m]; fshift[ slj][m] += b*temp[m]; } } /* TOTAL: 77 flops */ } static void spread_vsite4FDN(t_iatom ia[],real a,real b,real c, rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec xvi,xij,xik,xil,ra,rb,rja,rjb,rab,rm,rt; rvec fv,fj,fk,fl; real invrm,denom; real cfx,cfy,cfz; ivec di; int av,ai,aj,ak,al; int svi,sij,sik,sil; /* DEBUG: check atom indices */ av = ia[1]; ai = ia[2]; aj = ia[3]; ak = ia[4]; al = ia[5]; copy_rvec(f[av],fv); sij = pbc_rvec_sub(pbc,x[aj],x[ai],xij); sik = pbc_rvec_sub(pbc,x[ak],x[ai],xik); sil = pbc_rvec_sub(pbc,x[al],x[ai],xil); /* 9 flops */ ra[XX] = a*xik[XX]; ra[YY] = a*xik[YY]; ra[ZZ] = a*xik[ZZ]; rb[XX] = b*xil[XX]; rb[YY] = b*xil[YY]; rb[ZZ] = b*xil[ZZ]; /* 6 flops */ rvec_sub(ra,xij,rja); rvec_sub(rb,xij,rjb); rvec_sub(rb,ra,rab); /* 9 flops */ cprod(rja,rjb,rm); /* 9 flops */ invrm=gmx_invsqrt(norm2(rm)); denom=invrm*invrm; /* 5+5+2 flops */ cfx = c*invrm*fv[XX]; cfy = c*invrm*fv[YY]; cfz = c*invrm*fv[ZZ]; /* 6 Flops */ cprod(rm,rab,rt); /* 9 flops */ rt[XX] *= denom; rt[YY] *= denom; rt[ZZ] *= denom; /* 3flops */ fj[XX] = ( -rm[XX]*rt[XX]) * cfx + ( rab[ZZ]-rm[YY]*rt[XX]) * cfy + (-rab[YY]-rm[ZZ]*rt[XX]) * cfz; fj[YY] = (-rab[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( rab[XX]-rm[ZZ]*rt[YY]) * cfz; fj[ZZ] = ( rab[YY]-rm[XX]*rt[ZZ]) * cfx + (-rab[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz; /* 30 flops */ cprod(rjb,rm,rt); /* 9 flops */ rt[XX] *= denom*a; rt[YY] *= denom*a; rt[ZZ] *= denom*a; /* 3flops */ fk[XX] = ( -rm[XX]*rt[XX]) * cfx + (-a*rjb[ZZ]-rm[YY]*rt[XX]) * cfy + ( a*rjb[YY]-rm[ZZ]*rt[XX]) * cfz; fk[YY] = ( a*rjb[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + (-a*rjb[XX]-rm[ZZ]*rt[YY]) * cfz; fk[ZZ] = (-a*rjb[YY]-rm[XX]*rt[ZZ]) * cfx + ( a*rjb[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz; /* 36 flops */ cprod(rm,rja,rt); /* 9 flops */ rt[XX] *= denom*b; rt[YY] *= denom*b; rt[ZZ] *= denom*b; /* 3flops */ fl[XX] = ( -rm[XX]*rt[XX]) * cfx + ( b*rja[ZZ]-rm[YY]*rt[XX]) * cfy + (-b*rja[YY]-rm[ZZ]*rt[XX]) * cfz; fl[YY] = (-b*rja[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( b*rja[XX]-rm[ZZ]*rt[YY]) * cfz; fl[ZZ] = ( b*rja[YY]-rm[XX]*rt[ZZ]) * cfx + (-b*rja[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz; /* 36 flops */ f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX]; f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY]; f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ]; rvec_inc(f[aj],fj); rvec_inc(f[ak],fk); rvec_inc(f[al],fl); /* 21 flops */ if (g) { ivec_sub(SHIFT_IVEC(g,av),SHIFT_IVEC(g,ai),di); svi = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di); sij = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,ai),di); sik = IVEC2IS(di); ivec_sub(SHIFT_IVEC(g,al),SHIFT_IVEC(g,ai),di); sil = IVEC2IS(di); } else if (pbc) { svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi); } else { svi = CENTRAL; } if (fshift && (svi!=CENTRAL || sij!=CENTRAL || sik!=CENTRAL || sil!=CENTRAL)) { rvec_dec(fshift[svi],fv); fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX]; fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY]; fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ]; rvec_inc(fshift[sij],fj); rvec_inc(fshift[sik],fk); rvec_inc(fshift[sil],fl); } /* Total: 207 flops (Yuck!) */ } static int spread_vsiten(t_iatom ia[],t_iparams ip[], rvec x[],rvec f[],rvec fshift[], t_pbc *pbc,t_graph *g) { rvec xv,dx,fi; int n3,av,i,ai; real a; ivec di; int siv; n3 = 3*ip[ia[0]].vsiten.n; av = ia[1]; copy_rvec(x[av],xv); for(i=0; i<n3; i+=3) { ai = ia[i+2]; if (g) { ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,av),di); siv = IVEC2IS(di); } else if (pbc) { siv = pbc_dx_aiuc(pbc,x[ai],xv,dx); } else { siv = CENTRAL; } a = ip[ia[i]].vsiten.a; svmul(a,f[av],fi); rvec_inc(f[ai],fi); if (fshift && siv != CENTRAL) { rvec_inc(fshift[siv],fi); rvec_dec(fshift[CENTRAL],fi); } /* 6 Flops */ } return n3; } void spread_vsite_f(FILE *log,gmx_vsite_t *vsite, rvec x[],rvec f[],rvec *fshift, t_nrnb *nrnb,t_idef *idef, int ePBC,gmx_bool bMolPBC,t_graph *g,matrix box, t_commrec *cr) { real a1,b1,c1; int i,inc,m,nra,nr,tp,ftype; int nd2,nd3,nd3FD,nd3FAD,nd3OUT,nd4FD,nd4FDN,ndN; t_iatom *ia; t_iparams *ip; t_pbc pbc,*pbc_null,*pbc_null2; int *vsite_pbc; /* We only need to do pbc when we have inter-cg vsites */ if ((DOMAINDECOMP(cr) || bMolPBC) && vsite->n_intercg_vsite) { /* This is wasting some CPU time as we now do this multiple times * per MD step. But how often do we have vsites with full pbc? */ pbc_null = set_pbc_dd(&pbc,ePBC,cr->dd,FALSE,box); } else { pbc_null = NULL; } if (DOMAINDECOMP(cr)) { dd_clear_f_vsites(cr->dd,f); } else if (PARTDECOMP(cr) && vsite->vsitecomm != NULL) { pd_clear_nonlocal_constructs(vsite->vsitecomm,f); } ip = idef->iparams; nd2 = 0; nd3 = 0; nd3FD = 0; nd3FAD = 0; nd3OUT = 0; nd4FD = 0; nd4FDN = 0; ndN = 0; /* this loop goes backwards to be able to build * * higher type vsites from lower types */ pbc_null2 = NULL; for(ftype=F_NRE-1; (ftype>=0); ftype--) { if (interaction_function[ftype].flags & IF_VSITE) { nra = interaction_function[ftype].nratoms; nr = idef->il[ftype].nr; ia = idef->il[ftype].iatoms; if (pbc_null) { vsite_pbc = vsite->vsite_pbc_loc[ftype-F_VSITE2]; } else { vsite_pbc = NULL; } for(i=0; (i<nr); ) { /* Check if we need to apply pbc for this vsite */ if (vsite_pbc) { if (vsite_pbc[i/(1+nra)] > -2) pbc_null2 = pbc_null; else pbc_null2 = NULL; } tp = ia[0]; if (ftype != idef->functype[tp]) gmx_incons("Functiontypes for vsites wrong"); /* Constants for constructing */ a1 = ip[tp].vsite.a; /* Construct the vsite depending on type */ inc = nra+1; switch (ftype) { case F_VSITE2: spread_vsite2(ia,a1,x,f,fshift,pbc_null2,g); nd2++; break; case F_VSITE3: b1 = ip[tp].vsite.b; spread_vsite3(ia,a1,b1,x,f,fshift,pbc_null2,g); nd3++; break; case F_VSITE3FD: b1 = ip[tp].vsite.b; spread_vsite3FD(ia,a1,b1,x,f,fshift,pbc_null2,g); nd3FD++; break; case F_VSITE3FAD: b1 = ip[tp].vsite.b; spread_vsite3FAD(ia,a1,b1,x,f,fshift,pbc_null2,g); nd3FAD++; break; case F_VSITE3OUT: b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; spread_vsite3OUT(ia,a1,b1,c1,x,f,fshift,pbc_null2,g); nd3OUT++; break; case F_VSITE4FD: b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; spread_vsite4FD(ia,a1,b1,c1,x,f,fshift,pbc_null2,g); nd4FD++; break; case F_VSITE4FDN: b1 = ip[tp].vsite.b; c1 = ip[tp].vsite.c; spread_vsite4FDN(ia,a1,b1,c1,x,f,fshift,pbc_null2,g); nd4FDN++; break; case F_VSITEN: inc = spread_vsiten(ia,ip,x,f,fshift,pbc_null2,g); ndN += inc; break; default: gmx_fatal(FARGS,"No such vsite type %d in %s, line %d", ftype,__FILE__,__LINE__); } clear_rvec(f[ia[1]]); /* Increment loop variables */ i += inc; ia += inc; } } } inc_nrnb(nrnb,eNR_VSITE2, nd2 ); inc_nrnb(nrnb,eNR_VSITE3, nd3 ); inc_nrnb(nrnb,eNR_VSITE3FD, nd3FD ); inc_nrnb(nrnb,eNR_VSITE3FAD,nd3FAD ); inc_nrnb(nrnb,eNR_VSITE3OUT,nd3OUT ); inc_nrnb(nrnb,eNR_VSITE4FD, nd4FD ); inc_nrnb(nrnb,eNR_VSITE4FDN,nd4FDN ); inc_nrnb(nrnb,eNR_VSITEN, ndN ); if (DOMAINDECOMP(cr)) { dd_move_f_vsites(cr->dd,f,fshift); } else if (vsite->bPDvsitecomm) { /* We only move forces here, and they are independent of shifts */ move_construct_f(vsite->vsitecomm,f,cr); } } static int *atom2cg(t_block *cgs) { int *a2cg,cg,i; snew(a2cg,cgs->index[cgs->nr]); for(cg=0; cg<cgs->nr; cg++) { for(i=cgs->index[cg]; i<cgs->index[cg+1]; i++) a2cg[i] = cg; } return a2cg; } static int count_intercg_vsite(gmx_mtop_t *mtop) { int mb,mt,ftype,nral,i,cg,a; gmx_molblock_t *molb; gmx_moltype_t *molt; int *a2cg; t_ilist *il; t_iatom *ia; int n_intercg_vsite; n_intercg_vsite = 0; for(mb=0; mb<mtop->nmolblock; mb++) { molb = &mtop->molblock[mb]; molt = &mtop->moltype[molb->type]; a2cg = atom2cg(&molt->cgs); for(ftype=0; ftype<F_NRE; ftype++) { if (interaction_function[ftype].flags & IF_VSITE) { nral = NRAL(ftype); il = &molt->ilist[ftype]; ia = il->iatoms; for(i=0; i<il->nr; i+=1+nral) { cg = a2cg[ia[1+i]]; for(a=1; a<nral; a++) { if (a2cg[ia[1+a]] != cg) { n_intercg_vsite += molb->nmol; break; } } } } } sfree(a2cg); } return n_intercg_vsite; } static int **get_vsite_pbc(t_iparams *iparams,t_ilist *ilist, t_atom *atom,t_mdatoms *md, t_block *cgs,int *a2cg) { int ftype,nral,i,j,vsi,vsite,cg_v,cg_c,a,nc3=0; t_ilist *il; t_iatom *ia; int **vsite_pbc,*vsite_pbc_f; char *pbc_set; gmx_bool bViteOnlyCG_and_FirstAtom; /* Make an array that tells if the pbc of an atom is set */ snew(pbc_set,cgs->index[cgs->nr]); /* PBC is set for all non vsites */ for(a=0; a<cgs->index[cgs->nr]; a++) { if ((atom && atom[a].ptype != eptVSite) || (md && md->ptype[a] != eptVSite)) { pbc_set[a] = 1; } } snew(vsite_pbc,F_VSITEN-F_VSITE2+1); for(ftype=0; ftype<F_NRE; ftype++) { if (interaction_function[ftype].flags & IF_VSITE) { nral = NRAL(ftype); il = &ilist[ftype]; ia = il->iatoms; snew(vsite_pbc[ftype-F_VSITE2],il->nr/(1+nral)); vsite_pbc_f = vsite_pbc[ftype-F_VSITE2]; i = 0; while (i < il->nr) { vsi = i/(1+nral); vsite = ia[i+1]; cg_v = a2cg[vsite]; /* A value of -2 signals that this vsite and its contructing * atoms are all within the same cg, so no pbc is required. */ vsite_pbc_f[vsi] = -2; /* Check if constructing atoms are outside the vsite's cg */ nc3 = 0; if (ftype == F_VSITEN) { nc3 = 3*iparams[ia[i]].vsiten.n; for(j=0; j<nc3; j+=3) { if (a2cg[ia[i+j+2]] != cg_v) vsite_pbc_f[vsi] = -1; } } else { for(a=1; a<nral; a++) { if (a2cg[ia[i+1+a]] != cg_v) vsite_pbc_f[vsi] = -1; } } if (vsite_pbc_f[vsi] == -1) { /* Check if this is the first processed atom of a vsite only cg */ bViteOnlyCG_and_FirstAtom = TRUE; for(a=cgs->index[cg_v]; a<cgs->index[cg_v+1]; a++) { /* Non-vsites already have pbc set, so simply check for pbc_set */ if (pbc_set[a]) { bViteOnlyCG_and_FirstAtom = FALSE; break; } } if (bViteOnlyCG_and_FirstAtom) { /* First processed atom of a vsite only charge group. * The pbc of the input coordinates to construct_vsites * should be preserved. */ vsite_pbc_f[vsi] = vsite; } else if (cg_v != a2cg[ia[1+i+1]]) { /* This vsite has a different charge group index * than it's first constructing atom * and the charge group has more than one atom, * search for the first normal particle * or vsite that already had its pbc defined. * If nothing is found, use full pbc for this vsite. */ for(a=cgs->index[cg_v]; a<cgs->index[cg_v+1]; a++) { if (a != vsite && pbc_set[a]) { vsite_pbc_f[vsi] = a; if (gmx_debug_at) fprintf(debug,"vsite %d match pbc with atom %d\n", vsite+1,a+1); break; } } if (gmx_debug_at) fprintf(debug,"vsite atom %d cg %d - %d pbc atom %d\n", vsite+1,cgs->index[cg_v]+1,cgs->index[cg_v+1], vsite_pbc_f[vsi]+1); } } if (ftype == F_VSITEN) { /* The other entries in vsite_pbc_f are not used for center vsites */ i += nc3; } else { i += 1+nral; } /* This vsite now has its pbc defined */ pbc_set[vsite] = 1; } } } sfree(pbc_set); return vsite_pbc; } gmx_vsite_t *init_vsite(gmx_mtop_t *mtop,t_commrec *cr) { int nvsite,i; int *a2cg,cg; gmx_vsite_t *vsite; int mt; gmx_moltype_t *molt; /* check if there are vsites */ nvsite = 0; for(i=0; i<F_NRE; i++) { if (interaction_function[i].flags & IF_VSITE) { nvsite += gmx_mtop_ftype_count(mtop,i); } } if (nvsite == 0) { return NULL; } snew(vsite,1); vsite->n_intercg_vsite = count_intercg_vsite(mtop); if (vsite->n_intercg_vsite > 0 && DOMAINDECOMP(cr)) { vsite->nvsite_pbc_molt = mtop->nmoltype; snew(vsite->vsite_pbc_molt,vsite->nvsite_pbc_molt); for(mt=0; mt<mtop->nmoltype; mt++) { molt = &mtop->moltype[mt]; /* Make an atom to charge group index */ a2cg = atom2cg(&molt->cgs); vsite->vsite_pbc_molt[mt] = get_vsite_pbc(mtop->ffparams.iparams, molt->ilist, molt->atoms.atom,NULL, &molt->cgs,a2cg); sfree(a2cg); } snew(vsite->vsite_pbc_loc_nalloc,F_VSITEN-F_VSITE2+1); snew(vsite->vsite_pbc_loc ,F_VSITEN-F_VSITE2+1); } return vsite; } void set_vsite_top(gmx_vsite_t *vsite,gmx_localtop_t *top,t_mdatoms *md, t_commrec *cr) { int *a2cg; /* Make an atom to charge group index */ a2cg = atom2cg(&top->cgs); if (vsite->n_intercg_vsite > 0) { vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams, top->idef.il,NULL,md, &top->cgs,a2cg); if (PARTDECOMP(cr)) { snew(vsite->vsitecomm,1); vsite->bPDvsitecomm = setup_parallel_vsites(&(top->idef),cr,vsite->vsitecomm); } } sfree(a2cg); }
Java
// Copyright 2014 The Chromium Authors. All rights reserved. // Copyright (C) 2016 Apple Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma once #include "CSSPrimitiveValue.h" #include <wtf/text/StringView.h> namespace WebCore { enum CSSParserTokenType { IdentToken = 0, FunctionToken, AtKeywordToken, HashToken, UrlToken, BadUrlToken, DelimiterToken, NumberToken, PercentageToken, DimensionToken, IncludeMatchToken, DashMatchToken, PrefixMatchToken, SuffixMatchToken, SubstringMatchToken, ColumnToken, UnicodeRangeToken, WhitespaceToken, CDOToken, CDCToken, ColonToken, SemicolonToken, CommaToken, LeftParenthesisToken, RightParenthesisToken, LeftBracketToken, RightBracketToken, LeftBraceToken, RightBraceToken, StringToken, BadStringToken, EOFToken, CommentToken, }; enum NumericSign { NoSign, PlusSign, MinusSign, }; enum NumericValueType { IntegerValueType, NumberValueType, }; enum HashTokenType { HashTokenId, HashTokenUnrestricted, }; class CSSParserToken { WTF_MAKE_FAST_ALLOCATED; public: enum BlockType { NotBlock, BlockStart, BlockEnd, }; CSSParserToken(CSSParserTokenType, BlockType = NotBlock); CSSParserToken(CSSParserTokenType, StringView, BlockType = NotBlock); CSSParserToken(CSSParserTokenType, UChar); // for DelimiterToken CSSParserToken(CSSParserTokenType, double, NumericValueType, NumericSign); // for NumberToken CSSParserToken(CSSParserTokenType, UChar32, UChar32); // for UnicodeRangeToken CSSParserToken(HashTokenType, StringView); bool operator==(const CSSParserToken& other) const; bool operator!=(const CSSParserToken& other) const { return !(*this == other); } // Converts NumberToken to DimensionToken. void convertToDimensionWithUnit(StringView); // Converts NumberToken to PercentageToken. void convertToPercentage(); CSSParserTokenType type() const { return static_cast<CSSParserTokenType>(m_type); } StringView value() const { if (m_valueIs8Bit) return StringView(static_cast<const LChar*>(m_valueDataCharRaw), m_valueLength); return StringView(static_cast<const UChar*>(m_valueDataCharRaw), m_valueLength); } UChar delimiter() const; NumericSign numericSign() const; NumericValueType numericValueType() const; double numericValue() const; HashTokenType getHashTokenType() const { ASSERT(m_type == HashToken); return m_hashTokenType; } BlockType getBlockType() const { return static_cast<BlockType>(m_blockType); } CSSPrimitiveValue::UnitType unitType() const { return static_cast<CSSPrimitiveValue::UnitType>(m_unit); } UChar32 unicodeRangeStart() const { ASSERT(m_type == UnicodeRangeToken); return m_unicodeRange.start; } UChar32 unicodeRangeEnd() const { ASSERT(m_type == UnicodeRangeToken); return m_unicodeRange.end; } CSSValueID id() const; CSSValueID functionId() const; bool hasStringBacking() const; CSSPropertyID parseAsCSSPropertyID() const; void serialize(StringBuilder&) const; CSSParserToken copyWithUpdatedString(const StringView&) const; private: void initValueFromStringView(StringView string) { m_valueLength = string.length(); m_valueIs8Bit = string.is8Bit(); m_valueDataCharRaw = m_valueIs8Bit ? const_cast<void*>(static_cast<const void*>(string.characters8())) : const_cast<void*>(static_cast<const void*>(string.characters16())); } unsigned m_type : 6; // CSSParserTokenType unsigned m_blockType : 2; // BlockType unsigned m_numericValueType : 1; // NumericValueType unsigned m_numericSign : 2; // NumericSign unsigned m_unit : 7; // CSSPrimitiveValue::UnitType bool valueDataCharRawEqual(const CSSParserToken& other) const; // m_value... is an unpacked StringView so that we can pack it // tightly with the rest of this object for a smaller object size. bool m_valueIs8Bit : 1; unsigned m_valueLength; void* m_valueDataCharRaw; // Either LChar* or UChar*. union { UChar m_delimiter; HashTokenType m_hashTokenType; double m_numericValue; mutable int m_id; struct { UChar32 start; UChar32 end; } m_unicodeRange; }; }; } // namespace WebCore
Java
/** * @license Copyright (c) 2003-2016, CKSource - Frederico Knabben. All rights reserved. * For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'magicline', 'it', { title: 'Inserisci paragrafo qui' } );
Java
/* * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_RECEIVER_IMPL_H_ #define WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_RECEIVER_IMPL_H_ // This header is included to get the nested declaration of Packet structure. #include "webrtc/base/scoped_ptr.h" #include "webrtc/modules/rtp_rtcp/include/fec_receiver.h" #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h" #include "webrtc/typedefs.h" namespace webrtc { class CriticalSectionWrapper; class FecReceiverImpl : public FecReceiver { public: FecReceiverImpl(RtpData* callback); virtual ~FecReceiverImpl(); int32_t AddReceivedRedPacket(const RTPHeader& rtp_header, const uint8_t* incoming_rtp_packet, size_t packet_length, uint8_t ulpfec_payload_type) override; int32_t ProcessReceivedFec() override; FecPacketCounter GetPacketCounter() const override; private: rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_; RtpData* recovered_packet_callback_; ForwardErrorCorrection* fec_; // TODO(holmer): In the current version received_packet_list_ is never more // than one packet, since we process FEC every time a new packet // arrives. We should remove the list. ForwardErrorCorrection::ReceivedPacketList received_packet_list_; ForwardErrorCorrection::RecoveredPacketList recovered_packet_list_; FecPacketCounter packet_counter_; }; } // namespace webrtc #endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_RECEIVER_IMPL_H_
Java
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/pm_runtime.h> #include <linux/device.h> #include <linux/pm_qos_params.h> #include <mach/msm_hsusb_hw.h> #include <mach/msm72k_otg.h> #include <mach/msm_hsusb.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <mach/clk.h> #include <mach/msm_xo.h> #define MSM_USB_BASE (dev->regs) #define USB_LINK_RESET_TIMEOUT (msecs_to_jiffies(10)) #define DRIVER_NAME "msm_otg" static void otg_reset(struct otg_transceiver *xceiv, int phy_reset); static void msm_otg_set_vbus_state(int online); static void msm_otg_set_id_state(int online); struct msm_otg *the_msm_otg; static int is_host(void) { struct msm_otg *dev = the_msm_otg; if (dev->pmic_id_notif_supp) return dev->pmic_id_status ? 0 : 1; else if (dev->pdata->otg_mode == OTG_ID) return (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1; else return !test_bit(ID, &dev->inputs); } static int is_b_sess_vld(void) { struct msm_otg *dev = the_msm_otg; if (dev->pdata->otg_mode == OTG_ID) return (OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0; else return test_bit(B_SESS_VLD, &dev->inputs); } static unsigned ulpi_read(struct msm_otg *dev, unsigned reg) { unsigned ret, timeout = 100000; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); /* initiate read operation */ writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), USB_ULPI_VIEWPORT); /* wait for completion */ while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) cpu_relax(); if (timeout == 0) { pr_err("%s: timeout %08x\n", __func__, readl(USB_ULPI_VIEWPORT)); spin_unlock_irqrestore(&dev->lock, flags); return 0xffffffff; } ret = ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); spin_unlock_irqrestore(&dev->lock, flags); return ret; } static int ulpi_write(struct msm_otg *dev, unsigned val, unsigned reg) { unsigned timeout = 10000; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); /* initiate write operation */ writel(ULPI_RUN | ULPI_WRITE | ULPI_ADDR(reg) | ULPI_DATA(val), USB_ULPI_VIEWPORT); /* wait for completion */ while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; if (timeout == 0) { pr_err("%s: timeout\n", __func__); spin_unlock_irqrestore(&dev->lock, flags); return -1; } spin_unlock_irqrestore(&dev->lock, flags); return 0; } static int usb_ulpi_write(struct otg_transceiver *xceiv, u32 val, u32 reg) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); return ulpi_write(dev, val, reg); } static int usb_ulpi_read(struct otg_transceiver *xceiv, u32 reg) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); return ulpi_read(dev, reg); } #ifdef CONFIG_USB_EHCI_MSM static void enable_idgnd(struct msm_otg *dev) { /* Do nothing if instead of ID pin, USER controls mode switch */ if (dev->pdata->otg_mode == OTG_USER_CONTROL) return; ulpi_write(dev, (1<<4), 0x0E); ulpi_write(dev, (1<<4), 0x11); writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC); } static void disable_idgnd(struct msm_otg *dev) { /* Do nothing if instead of ID pin, USER controls mode switch */ if (dev->pdata->otg_mode == OTG_USER_CONTROL) return; ulpi_write(dev, (1<<4), 0x0F); ulpi_write(dev, (1<<4), 0x12); writel(readl(USB_OTGSC) & ~OTGSC_IDIE, USB_OTGSC); } #endif static void enable_idabc(struct msm_otg *dev) { #ifdef CONFIG_USB_MSM_ACA ulpi_write(dev, (1<<5), 0x0E); ulpi_write(dev, (1<<5), 0x11); #endif } static void disable_idabc(struct msm_otg *dev) { #ifdef CONFIG_USB_MSM_ACA ulpi_write(dev, (1<<5), 0x0F); ulpi_write(dev, (1<<5), 0x12); #endif } static void enable_sess_valid(struct msm_otg *dev) { /* Do nothing if instead of ID pin, USER controls mode switch */ if (dev->pdata->otg_mode == OTG_USER_CONTROL) return; ulpi_write(dev, (1<<2), 0x0E); ulpi_write(dev, (1<<2), 0x11); writel(readl(USB_OTGSC) | OTGSC_BSVIE, USB_OTGSC); } static void disable_sess_valid(struct msm_otg *dev) { /* Do nothing if instead of ID pin, USER controls mode switch */ if (dev->pdata->otg_mode == OTG_USER_CONTROL) return; ulpi_write(dev, (1<<2), 0x0F); ulpi_write(dev, (1<<2), 0x12); writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC); } #ifdef CONFIG_USB_MSM_ACA static void set_aca_id_inputs(struct msm_otg *dev) { u8 phy_ints; phy_ints = ulpi_read(dev, 0x13); pr_debug("phy_ints = %x\n", phy_ints); clear_bit(ID_A, &dev->inputs); clear_bit(ID_B, &dev->inputs); clear_bit(ID_C, &dev->inputs); if (phy_id_state_a(phy_ints)) { pr_debug("ID_A set\n"); set_bit(ID_A, &dev->inputs); set_bit(A_BUS_REQ, &dev->inputs); } else if (phy_id_state_b(phy_ints)) { pr_debug("ID_B set\n"); set_bit(ID_B, &dev->inputs); } else if (phy_id_state_c(phy_ints)) { pr_debug("ID_C set\n"); set_bit(ID_C, &dev->inputs); } } #define get_aca_bmaxpower(dev) (dev->b_max_power) #define set_aca_bmaxpower(dev, power) (dev->b_max_power = power) #else #define get_aca_bmaxpower(dev) 0 #define set_aca_bmaxpower(dev, power) #endif static inline void set_pre_emphasis_level(struct msm_otg *dev) { unsigned res = 0; if (!dev->pdata || dev->pdata->pemp_level == PRE_EMPHASIS_DEFAULT) return; res = ulpi_read(dev, ULPI_CONFIG_REG3); res &= ~(ULPI_PRE_EMPHASIS_MASK); if (dev->pdata->pemp_level != PRE_EMPHASIS_DISABLE) res |= dev->pdata->pemp_level; ulpi_write(dev, res, ULPI_CONFIG_REG3); } static inline void set_cdr_auto_reset(struct msm_otg *dev) { unsigned res = 0; if (!dev->pdata || dev->pdata->cdr_autoreset == CDR_AUTO_RESET_DEFAULT) return; res = ulpi_read(dev, ULPI_DIGOUT_CTRL); if (dev->pdata->cdr_autoreset == CDR_AUTO_RESET_ENABLE) res &= ~ULPI_CDR_AUTORESET; else res |= ULPI_CDR_AUTORESET; ulpi_write(dev, res, ULPI_DIGOUT_CTRL); } static inline void set_se1_gating(struct msm_otg *dev) { unsigned res = 0; if (!dev->pdata || dev->pdata->se1_gating == SE1_GATING_DEFAULT) return; res = ulpi_read(dev, ULPI_DIGOUT_CTRL); if (dev->pdata->se1_gating == SE1_GATING_ENABLE) res &= ~ULPI_SE1_GATE; else res |= ULPI_SE1_GATE; ulpi_write(dev, res, ULPI_DIGOUT_CTRL); } static inline void set_driver_amplitude(struct msm_otg *dev) { unsigned res = 0; if (!dev->pdata || dev->pdata->drv_ampl == HS_DRV_AMPLITUDE_DEFAULT) return; res = ulpi_read(dev, ULPI_CONFIG_REG2); res &= ~ULPI_DRV_AMPL_MASK; if (dev->pdata->drv_ampl != HS_DRV_AMPLITUDE_ZERO_PERCENT) res |= dev->pdata->drv_ampl; ulpi_write(dev, res, ULPI_CONFIG_REG2); } static const char *state_string(enum usb_otg_state state) { switch (state) { case OTG_STATE_A_IDLE: return "a_idle"; case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; case OTG_STATE_A_HOST: return "a_host"; case OTG_STATE_A_SUSPEND: return "a_suspend"; case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; case OTG_STATE_B_IDLE: return "b_idle"; case OTG_STATE_B_SRP_INIT: return "b_srp_init"; case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; case OTG_STATE_B_HOST: return "b_host"; default: return "UNDEFINED"; } } static const char *timer_string(int bit) { switch (bit) { case A_WAIT_VRISE: return "a_wait_vrise"; case A_WAIT_VFALL: return "a_wait_vfall"; case B_SRP_FAIL: return "b_srp_fail"; case A_WAIT_BCON: return "a_wait_bcon"; case A_AIDL_BDIS: return "a_aidl_bdis"; case A_BIDL_ADIS: return "a_bidl_adis"; case B_ASE0_BRST: return "b_ase0_brst"; default: return "UNDEFINED"; } } /* Prevent idle power collapse(pc) while operating in peripheral mode */ static void otg_pm_qos_update_latency(struct msm_otg *dev, int vote) { struct msm_otg_platform_data *pdata = dev->pdata; u32 swfi_latency = 0; if (pdata) swfi_latency = pdata->swfi_latency + 1; if (vote) pm_qos_update_request(pdata->pm_qos_req_dma, swfi_latency); else pm_qos_update_request(pdata->pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); } /* If USB Core is running its protocol engine based on PCLK, * PCLK must be running at >60Mhz for correct HSUSB operation and * USB core cannot tolerate frequency changes on PCLK. For such * USB cores, vote for maximum clk frequency on pclk source */ static void msm_otg_vote_for_pclk_source(struct msm_otg *dev, int vote) { if (!pclk_requires_voting(&dev->otg)) return; if (dev->pdata->usb_in_sps) { if (vote) clk_set_min_rate(dev->dfab_clk, 64000000); else clk_set_min_rate(dev->dfab_clk, 0); return; } if (vote) clk_enable(dev->pdata->ebi1_clk); else clk_disable(dev->pdata->ebi1_clk); } /* Controller gives interrupt for every 1 mesc if 1MSIE is set in OTGSC. * This interrupt can be used as a timer source and OTG timers can be * implemented. But hrtimers on MSM hardware can give atleast 1/32 KHZ * precision. This precision is more than enough for OTG timers. */ static enum hrtimer_restart msm_otg_timer_func(struct hrtimer *_timer) { struct msm_otg *dev = container_of(_timer, struct msm_otg, timer); /* Phy lockup issues are observed when VBUS Valid interrupt is * enabled. Hence set A_VBUS_VLD upon timer exipration. */ if (dev->active_tmout == A_WAIT_VRISE) set_bit(A_VBUS_VLD, &dev->inputs); else set_bit(dev->active_tmout, &dev->tmouts); pr_debug("expired %s timer\n", timer_string(dev->active_tmout)); queue_work(dev->wq, &dev->sm_work); return HRTIMER_NORESTART; } static void msm_otg_del_timer(struct msm_otg *dev) { int bit = dev->active_tmout; pr_debug("deleting %s timer. remaining %lld msec \n", timer_string(bit), div_s64(ktime_to_us(hrtimer_get_remaining(&dev->timer)), 1000)); hrtimer_cancel(&dev->timer); clear_bit(bit, &dev->tmouts); } static void msm_otg_start_timer(struct msm_otg *dev, int time, int bit) { clear_bit(bit, &dev->tmouts); dev->active_tmout = bit; pr_debug("starting %s timer\n", timer_string(bit)); hrtimer_start(&dev->timer, ktime_set(time / 1000, (time % 1000) * 1000000), HRTIMER_MODE_REL); } /* No two otg timers run in parallel. So one hrtimer is sufficient */ static void msm_otg_init_timer(struct msm_otg *dev) { hrtimer_init(&dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); dev->timer.function = msm_otg_timer_func; } static const char *event_string(enum usb_otg_event event) { switch (event) { case OTG_EVENT_DEV_CONN_TMOUT: return "DEV_CONN_TMOUT"; case OTG_EVENT_NO_RESP_FOR_HNP_ENABLE: return "NO_RESP_FOR_HNP_ENABLE"; case OTG_EVENT_HUB_NOT_SUPPORTED: return "HUB_NOT_SUPPORTED"; case OTG_EVENT_DEV_NOT_SUPPORTED: return "DEV_NOT_SUPPORTED,"; case OTG_EVENT_HNP_FAILED: return "HNP_FAILED"; case OTG_EVENT_NO_RESP_FOR_SRP: return "NO_RESP_FOR_SRP"; default: return "UNDEFINED"; } } static int msm_otg_send_event(struct otg_transceiver *xceiv, enum usb_otg_event event) { char module_name[16]; char udev_event[128]; char *envp[] = { module_name, udev_event, NULL }; int ret; pr_debug("sending %s event\n", event_string(event)); snprintf(module_name, 16, "MODULE=%s", DRIVER_NAME); snprintf(udev_event, 128, "EVENT=%s", event_string(event)); ret = kobject_uevent_env(&xceiv->dev->kobj, KOBJ_CHANGE, envp); if (ret < 0) pr_info("uevent sending failed with ret = %d\n", ret); return ret; } static int msm_otg_start_hnp(struct otg_transceiver *xceiv) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); enum usb_otg_state state; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); state = dev->otg.state; spin_unlock_irqrestore(&dev->lock, flags); if (state != OTG_STATE_A_HOST) { pr_err("HNP can not be initiated in %s state\n", state_string(state)); return -EINVAL; } pr_debug("A-Host: HNP initiated\n"); clear_bit(A_BUS_REQ, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); return 0; } static int msm_otg_start_srp(struct otg_transceiver *xceiv) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); u32 val; int ret = 0; enum usb_otg_state state; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); state = dev->otg.state; spin_unlock_irqrestore(&dev->lock, flags); if (state != OTG_STATE_B_IDLE) { pr_err("SRP can not be initiated in %s state\n", state_string(state)); ret = -EINVAL; goto out; } if ((jiffies - dev->b_last_se0_sess) < msecs_to_jiffies(TB_SRP_INIT)) { pr_debug("initial conditions of SRP are not met. Try again" "after some time\n"); ret = -EAGAIN; goto out; } /* Harware auto assist data pulsing: Data pulse is given * for 7msec; wait for vbus */ val = readl(USB_OTGSC); writel((val & ~OTGSC_INTR_STS_MASK) | OTGSC_HADP, USB_OTGSC); /* VBUS plusing is obsoleted in OTG 2.0 supplement */ out: return ret; } static int msm_otg_set_power(struct otg_transceiver *xceiv, unsigned mA) { static enum chg_type curr_chg = USB_CHG_TYPE__INVALID; struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); struct msm_otg_platform_data *pdata = dev->pdata; enum chg_type new_chg = atomic_read(&dev->chg_type); unsigned charge = mA; /* Call chg_connected only if the charger has changed */ if (new_chg != curr_chg && pdata->chg_connected) { curr_chg = new_chg; pdata->chg_connected(new_chg); } /* Always use USB_IDCHG_MAX for charging in ID_B and ID_C */ if (test_bit(ID_C, &dev->inputs) || test_bit(ID_B, &dev->inputs)) charge = USB_IDCHG_MAX; pr_debug("Charging with %dmA current\n", charge); /* Call vbus_draw only if the charger is of known type */ if (pdata->chg_vbus_draw && new_chg != USB_CHG_TYPE__INVALID) pdata->chg_vbus_draw(charge); if (new_chg == USB_CHG_TYPE__WALLCHARGER) { wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } return 0; } static int msm_otg_set_clk(struct otg_transceiver *xceiv, int on) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); if (!dev || (dev != the_msm_otg)) return -ENODEV; if (on) /* enable clocks */ clk_enable(dev->hs_clk); else clk_disable(dev->hs_clk); return 0; } static void msm_otg_start_peripheral(struct otg_transceiver *xceiv, int on) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); struct msm_otg_platform_data *pdata = dev->pdata; if (!xceiv->gadget) return; if (on) { if (pdata->setup_gpio) pdata->setup_gpio(USB_SWITCH_PERIPHERAL); /* vote for minimum dma_latency to prevent idle * power collapse(pc) while running in peripheral mode. */ otg_pm_qos_update_latency(dev, 1); /* increment the clk reference count so that * it would be still on when disabled from * low power mode routine */ if (dev->pdata->pclk_required_during_lpm) clk_enable(dev->hs_pclk); usb_gadget_vbus_connect(xceiv->gadget); } else { atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID); usb_gadget_vbus_disconnect(xceiv->gadget); /* decrement the clk reference count so that * it would be off when disabled from * low power mode routine */ if (dev->pdata->pclk_required_during_lpm) clk_disable(dev->hs_pclk); otg_pm_qos_update_latency(dev, 0); if (pdata->setup_gpio) pdata->setup_gpio(USB_SWITCH_DISABLE); } } static void msm_otg_start_host(struct otg_transceiver *xceiv, int on) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); struct msm_otg_platform_data *pdata = dev->pdata; if (!xceiv->host) return; if (dev->start_host) { /* Some targets, e.g. ST1.5, use GPIO to choose b/w connector */ if (on && pdata->setup_gpio) pdata->setup_gpio(USB_SWITCH_HOST); /* increment or decrement the clk reference count * to avoid usb h/w lockup issues when low power * mode is initiated and vbus is on. */ if (dev->pdata->pclk_required_during_lpm) { if (on) clk_enable(dev->hs_pclk); else clk_disable(dev->hs_pclk); } dev->start_host(xceiv->host, on); if (!on && pdata->setup_gpio) pdata->setup_gpio(USB_SWITCH_DISABLE); } } static int msm_otg_suspend(struct msm_otg *dev) { unsigned long timeout; int vbus = 0; unsigned ret; enum chg_type chg_type = atomic_read(&dev->chg_type); unsigned long flags; disable_irq(dev->irq); if (atomic_read(&dev->in_lpm)) goto out; #ifdef CONFIG_USB_MSM_ACA /* * ACA interrupts are disabled before entering into LPM. * If LPM is allowed in host mode with accessory charger * connected or only accessory charger is connected, * there is a chance that charger is removed and we will * not know about it. * * REVISIT * * Allowing LPM in case of gadget bus suspend is tricky. * Bus suspend can happen in two states. * 1. ID_float: Allowing LPM has pros and cons. If LPM is allowed * and accessory charger is connected, we miss ID_float --> ID_C * transition where we could draw large amount of current * compared to the suspend current. * 2. ID_C: We can not allow LPM. If accessory charger is removed * we should not draw more than what host could supply which will * be less compared to accessory charger. * * For simplicity, LPM is not allowed in bus suspend. */ if ((test_bit(ID, &dev->inputs) && test_bit(B_SESS_VLD, &dev->inputs) && chg_type != USB_CHG_TYPE__WALLCHARGER) || test_bit(ID_A, &dev->inputs)) goto out; /* Disable ID_abc interrupts else it causes spurious interrupt */ disable_idabc(dev); #endif ulpi_read(dev, 0x14);/* clear PHY interrupt latch register */ /* * Turn on PHY comparators if, * 1. USB wall charger is connected (bus suspend is not supported) * 2. Host bus suspend * 3. host is supported, but, id is not routed to pmic * 4. peripheral is supported, but, vbus is not routed to pmic */ if ((dev->otg.gadget && chg_type == USB_CHG_TYPE__WALLCHARGER) || (dev->otg.host && is_host()) || (dev->otg.host && !dev->pmic_id_notif_supp) || (dev->otg.gadget && !dev->pmic_vbus_notif_supp)) { ulpi_write(dev, 0x01, 0x30); } ulpi_write(dev, 0x08, 0x09);/* turn off PLL on integrated phy */ timeout = jiffies + msecs_to_jiffies(500); disable_phy_clk(); while (!is_phy_clk_disabled()) { if (time_after(jiffies, timeout)) { pr_err("%s: Unable to suspend phy\n", __func__); /* * Start otg state machine in default state upon * phy suspend failure*/ spin_lock_irqsave(&dev->lock, flags); dev->otg.state = OTG_STATE_UNDEFINED; spin_unlock_irqrestore(&dev->lock, flags); queue_work(dev->wq, &dev->sm_work); goto out; } msleep(1); /* check if there are any pending interrupts*/ if (((readl(USB_OTGSC) & OTGSC_INTR_MASK) >> 8) & readl(USB_OTGSC)) { enable_idabc(dev); goto out; } } writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD); if (dev->hs_pclk) clk_disable(dev->hs_pclk); if (dev->hs_cclk) clk_disable(dev->hs_cclk); /* usb phy no more require TCXO clock, hence vote for TCXO disable*/ ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_OFF); if (ret) pr_err("%s failed to devote for" "TCXO D1 buffer%d\n", __func__, ret); if (device_may_wakeup(dev->otg.dev)) { enable_irq_wake(dev->irq); if (dev->vbus_on_irq) enable_irq_wake(dev->vbus_on_irq); } msm_otg_vote_for_pclk_source(dev, 0); atomic_set(&dev->in_lpm, 1); if (!vbus && dev->pmic_vbus_notif_supp) { pr_debug("phy can power collapse: (%d)\n", can_phy_power_collapse(dev)); if (can_phy_power_collapse(dev) && dev->pdata->ldo_enable) { pr_debug("disabling the regulators\n"); dev->pdata->ldo_enable(0); } } /* phy can interrupts when vddcx is at 0.75, so irrespective * of pmic notification support, configure vddcx @0.75 */ if (dev->pdata->config_vddcx) dev->pdata->config_vddcx(0); pr_info("%s: usb in low power mode\n", __func__); out: enable_irq(dev->irq); return 0; } static int msm_otg_resume(struct msm_otg *dev) { unsigned temp; unsigned ret; if (!atomic_read(&dev->in_lpm)) return 0; /* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */ if (dev->pdata->config_vddcx) { ret = dev->pdata->config_vddcx(1); if (ret) { pr_err("%s: unable to enable vddcx digital core:%d\n", __func__, ret); } } if (dev->pdata->ldo_set_voltage) dev->pdata->ldo_set_voltage(3400); /* Vote for TCXO when waking up the phy */ ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON); if (ret) pr_err("%s failed to vote for" "TCXO D1 buffer%d\n", __func__, ret); msm_otg_vote_for_pclk_source(dev, 1); if (dev->hs_pclk) clk_enable(dev->hs_pclk); if (dev->hs_cclk) clk_enable(dev->hs_cclk); temp = readl(USB_USBCMD); temp &= ~ASYNC_INTR_CTRL; temp &= ~ULPI_STP_CTRL; writel(temp, USB_USBCMD); if (device_may_wakeup(dev->otg.dev)) { disable_irq_wake(dev->irq); if (dev->vbus_on_irq) disable_irq_wake(dev->vbus_on_irq); } atomic_set(&dev->in_lpm, 0); pr_info("%s: usb exited from low power mode\n", __func__); return 0; } static void msm_otg_get_resume(struct msm_otg *dev) { #ifdef CONFIG_PM_RUNTIME pm_runtime_get_noresume(dev->otg.dev); pm_runtime_resume(dev->otg.dev); #else msm_otg_resume(dev); #endif } static void msm_otg_put_suspend(struct msm_otg *dev) { #ifdef CONFIG_PM_RUNTIME pm_runtime_put_sync(dev->otg.dev); #else msm_otg_suspend(dev); #endif } static void msm_otg_resume_w(struct work_struct *w) { struct msm_otg *dev = container_of(w, struct msm_otg, otg_resume_work); unsigned long timeout; msm_otg_get_resume(dev); if (!is_phy_clk_disabled()) goto phy_resumed; timeout = jiffies + usecs_to_jiffies(100); enable_phy_clk(); while (is_phy_clk_disabled()) { if (time_after(jiffies, timeout)) { pr_err("%s: Unable to wakeup phy\n", __func__); /* Reset both phy and link */ otg_reset(&dev->otg, 1); break; } udelay(10); } phy_resumed: /* Enable Idabc interrupts as these were disabled before entering LPM */ enable_idabc(dev); /* If resume signalling finishes before lpm exit, PCD is not set in * USBSTS register. Drive resume signal to the downstream device now * so that host driver can process the upcoming port change interrupt.*/ if (is_host() || test_bit(ID_A, &dev->inputs)) writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); /* Enable irq which was disabled before scheduling this work. * But don't release wake_lock, as we got async interrupt and * there will be some work pending for OTG state machine. */ enable_irq(dev->irq); } static int msm_otg_set_suspend(struct otg_transceiver *xceiv, int suspend) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); enum usb_otg_state state; unsigned long flags; if (!dev || (dev != the_msm_otg)) return -ENODEV; spin_lock_irqsave(&dev->lock, flags); state = dev->otg.state; spin_unlock_irqrestore(&dev->lock, flags); pr_debug("suspend request in state: %s\n", state_string(state)); if (suspend) { switch (state) { #ifndef CONFIG_MSM_OTG_ENABLE_A_WAIT_BCON_TIMEOUT case OTG_STATE_A_WAIT_BCON: msm_otg_put_suspend(dev); break; #endif case OTG_STATE_A_HOST: clear_bit(A_BUS_REQ, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); break; case OTG_STATE_B_PERIPHERAL: if (xceiv->gadget->b_hnp_enable) { set_bit(A_BUS_SUSPEND, &dev->inputs); set_bit(B_BUS_REQ, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } break; case OTG_STATE_A_PERIPHERAL: msm_otg_start_timer(dev, TA_BIDL_ADIS, A_BIDL_ADIS); break; default: break; } } else { unsigned long timeout; switch (state) { case OTG_STATE_A_PERIPHERAL: /* A-peripheral observed activity on bus. * clear A_BIDL_ADIS timer. */ msm_otg_del_timer(dev); break; case OTG_STATE_A_SUSPEND: /* Remote wakeup or resume */ set_bit(A_BUS_REQ, &dev->inputs); spin_lock_irqsave(&dev->lock, flags); dev->otg.state = OTG_STATE_A_HOST; spin_unlock_irqrestore(&dev->lock, flags); if (test_bit(ID_A, &dev->inputs) && (get_aca_bmaxpower(dev) < USB_IDCHG_MIN)) msm_otg_set_power(xceiv, USB_IDCHG_MIN - get_aca_bmaxpower(dev)); break; default: break; } if (suspend == atomic_read(&dev->in_lpm)) return 0; disable_irq(dev->irq); if (dev->pmic_vbus_notif_supp) if (can_phy_power_collapse(dev) && dev->pdata->ldo_enable) dev->pdata->ldo_enable(1); msm_otg_get_resume(dev); if (!is_phy_clk_disabled()) goto out; timeout = jiffies + usecs_to_jiffies(100); enable_phy_clk(); while (is_phy_clk_disabled()) { if (time_after(jiffies, timeout)) { pr_err("%s: Unable to wakeup phy\n", __func__); /* Reset both phy and link */ otg_reset(&dev->otg, 1); break; } udelay(10); } out: enable_idabc(dev); enable_irq(dev->irq); } return 0; } static int msm_otg_set_peripheral(struct otg_transceiver *xceiv, struct usb_gadget *gadget) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); if (!dev || (dev != the_msm_otg)) return -ENODEV; if (!gadget) { msm_otg_start_peripheral(xceiv, 0); dev->otg.gadget = 0; disable_sess_valid(dev); if (!dev->otg.host) disable_idabc(dev); return 0; } dev->otg.gadget = gadget; pr_info("peripheral driver registered w/ tranceiver\n"); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); return 0; } #ifdef CONFIG_USB_EHCI_MSM static int usbdev_notify(struct notifier_block *self, unsigned long action, void *device) { enum usb_otg_state state; struct msm_otg *dev = container_of(self, struct msm_otg, usbdev_nb); struct usb_device *udev = device; int work = 1; /* Interested in only devices directly connected * to root hub directly. */ if (!udev->parent || udev->parent->parent) goto out; spin_lock_irq(&dev->lock); state = dev->otg.state; spin_unlock_irq(&dev->lock); switch (state) { case OTG_STATE_A_WAIT_BCON: if (action == USB_DEVICE_ADD) { pr_debug("B_CONN set\n"); set_bit(B_CONN, &dev->inputs); if (udev->actconfig) { set_aca_bmaxpower(dev, udev->actconfig->desc.bMaxPower * 2); goto do_work; } if (udev->portnum == udev->bus->otg_port) set_aca_bmaxpower(dev, USB_IB_UNCFG); else set_aca_bmaxpower(dev, 100); } break; case OTG_STATE_A_HOST: if (action == USB_DEVICE_REMOVE) { pr_debug("B_CONN clear\n"); clear_bit(B_CONN, &dev->inputs); set_aca_bmaxpower(dev, 0); } break; default: work = 0; break; } do_work: if (work) { wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } out: return NOTIFY_OK; } static int msm_otg_set_host(struct otg_transceiver *xceiv, struct usb_bus *host) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); if (!dev || (dev != the_msm_otg)) return -ENODEV; if (!dev->start_host) return -ENODEV; if (!host) { msm_otg_start_host(xceiv, REQUEST_STOP); usb_unregister_notify(&dev->usbdev_nb); dev->otg.host = 0; dev->start_host = 0; disable_idgnd(dev); if (!dev->otg.gadget) disable_idabc(dev); return 0; } #ifdef CONFIG_USB_OTG host->otg_port = 1; #endif dev->usbdev_nb.notifier_call = usbdev_notify; usb_register_notify(&dev->usbdev_nb); dev->otg.host = host; pr_info("host driver registered w/ tranceiver\n"); #ifndef CONFIG_USB_MSM_72K wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); #endif return 0; } #endif void msm_otg_set_id_state(int id) { struct msm_otg *dev = the_msm_otg; if (id == dev->pmic_id_status) return; wake_lock(&dev->wlock); if (id) { set_bit(ID, &dev->inputs); dev->pmic_id_status = 1; } else { clear_bit(ID, &dev->inputs); set_bit(A_BUS_REQ, &dev->inputs); dev->pmic_id_status = 0; } queue_work(dev->wq, &dev->sm_work); } void msm_otg_set_vbus_state(int online) { struct msm_otg *dev = the_msm_otg; if (!atomic_read(&dev->in_lpm) || !online) return; wake_lock(&dev->wlock); set_bit(B_SESS_VLD, &dev->inputs); queue_work(dev->wq, &dev->sm_work); } static irqreturn_t msm_otg_irq(int irq, void *data) { struct msm_otg *dev = data; u32 otgsc, sts, pc, sts_mask; irqreturn_t ret = IRQ_HANDLED; int work = 0; enum usb_otg_state state; if (atomic_read(&dev->in_lpm)) { disable_irq_nosync(dev->irq); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->otg_resume_work); goto out; } /* Return immediately if instead of ID pin, USER controls mode switch */ if (dev->pdata->otg_mode == OTG_USER_CONTROL) return IRQ_NONE; otgsc = readl(USB_OTGSC); sts = readl(USB_USBSTS); sts_mask = (otgsc & OTGSC_INTR_MASK) >> 8; if (!((otgsc & sts_mask) || (sts & STS_PCI))) { ret = IRQ_NONE; goto out; } spin_lock(&dev->lock); state = dev->otg.state; spin_unlock(&dev->lock); pr_debug("IRQ state: %s\n", state_string(state)); pr_debug("otgsc = %x\n", otgsc); if ((otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) { if (otgsc & OTGSC_ID) { pr_debug("Id set\n"); set_bit(ID, &dev->inputs); } else { pr_debug("Id clear\n"); /* Assert a_bus_req to supply power on * VBUS when Micro/Mini-A cable is connected * with out user intervention. */ set_bit(A_BUS_REQ, &dev->inputs); clear_bit(ID, &dev->inputs); } writel(otgsc, USB_OTGSC); work = 1; } else if (otgsc & OTGSC_BSVIS) { writel(otgsc, USB_OTGSC); /* BSV interrupt comes when operating as an A-device * (VBUS on/off). * But, handle BSV when charger is removed from ACA in ID_A */ if ((state >= OTG_STATE_A_IDLE) && !test_bit(ID_A, &dev->inputs)) goto out; if (otgsc & OTGSC_BSV) { pr_debug("BSV set\n"); set_bit(B_SESS_VLD, &dev->inputs); } else { pr_debug("BSV clear\n"); clear_bit(B_SESS_VLD, &dev->inputs); } work = 1; } else if (otgsc & OTGSC_DPIS) { pr_debug("DPIS detected\n"); writel(otgsc, USB_OTGSC); set_bit(A_SRP_DET, &dev->inputs); set_bit(A_BUS_REQ, &dev->inputs); work = 1; } else if (sts & STS_PCI) { pc = readl(USB_PORTSC); pr_debug("portsc = %x\n", pc); ret = IRQ_NONE; /* HCD Acks PCI interrupt. We use this to switch * between different OTG states. */ work = 1; switch (state) { case OTG_STATE_A_SUSPEND: if (dev->otg.host->b_hnp_enable && (pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) { pr_debug("B_CONN clear\n"); clear_bit(B_CONN, &dev->inputs); } break; case OTG_STATE_B_WAIT_ACON: if ((pc & PORTSC_CSC) && (pc & PORTSC_CCS)) { pr_debug("A_CONN set\n"); set_bit(A_CONN, &dev->inputs); /* Clear ASE0_BRST timer */ msm_otg_del_timer(dev); } break; case OTG_STATE_B_HOST: if ((pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) { pr_debug("A_CONN clear\n"); clear_bit(A_CONN, &dev->inputs); } break; default: work = 0; break; } } if (work) { #ifdef CONFIG_USB_MSM_ACA /* With ACA, ID can change bcoz of BSVIS as well, so update */ if ((otgsc & OTGSC_IDIS) || (otgsc & OTGSC_BSVIS)) set_aca_id_inputs(dev); #endif wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } out: return ret; } #define ULPI_VERIFY_MAX_LOOP_COUNT 5 #define PHY_CALIB_RETRY_COUNT 10 static void phy_clk_reset(struct msm_otg *dev) { unsigned rc; enum clk_reset_action assert = CLK_RESET_ASSERT; if (dev->pdata->phy_reset_sig_inverted) assert = CLK_RESET_DEASSERT; rc = clk_reset(dev->phy_reset_clk, assert); if (rc) { pr_err("%s: phy clk assert failed\n", __func__); return; } msleep(1); rc = clk_reset(dev->phy_reset_clk, !assert); if (rc) { pr_err("%s: phy clk deassert failed\n", __func__); return; } msleep(1); } static unsigned ulpi_read_with_reset(struct msm_otg *dev, unsigned reg) { int temp; unsigned res; for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { res = ulpi_read(dev, reg); if (res != 0xffffffff) return res; phy_clk_reset(dev); } pr_err("%s: ulpi read failed for %d times\n", __func__, ULPI_VERIFY_MAX_LOOP_COUNT); return -1; } static int ulpi_write_with_reset(struct msm_otg *dev, unsigned val, unsigned reg) { int temp, res; for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { res = ulpi_write(dev, val, reg); if (!res) return 0; phy_clk_reset(dev); } pr_err("%s: ulpi write failed for %d times\n", __func__, ULPI_VERIFY_MAX_LOOP_COUNT); return -1; } /* some of the older targets does not turn off the PLL * if onclock bit is set and clocksuspendM bit is on, * hence clear them too and initiate the suspend mode * by clearing SupendM bit. */ static inline int turn_off_phy_pll(struct msm_otg *dev) { unsigned res; res = ulpi_read_with_reset(dev, ULPI_CONFIG_REG1); if (res == 0xffffffff) return -ETIMEDOUT; res = ulpi_write_with_reset(dev, res & ~(ULPI_ONCLOCK), ULPI_CONFIG_REG1); if (res) return -ETIMEDOUT; res = ulpi_write_with_reset(dev, ULPI_CLOCK_SUSPENDM, ULPI_IFC_CTRL_CLR); if (res) return -ETIMEDOUT; /*Clear SuspendM bit to initiate suspend mode */ res = ulpi_write_with_reset(dev, ULPI_SUSPENDM, ULPI_FUNC_CTRL_CLR); if (res) return -ETIMEDOUT; return res; } static inline int check_phy_caliberation(struct msm_otg *dev) { unsigned res; res = ulpi_read_with_reset(dev, ULPI_DEBUG); if (res == 0xffffffff) return -ETIMEDOUT; if (!(res & ULPI_CALIB_STS) && ULPI_CALIB_VAL(res)) return 0; return -1; } static int msm_otg_phy_caliberate(struct msm_otg *dev) { int i = 0; unsigned long res; do { res = turn_off_phy_pll(dev); if (res) return -ETIMEDOUT; /* bring phy out of suspend */ phy_clk_reset(dev); res = check_phy_caliberation(dev); if (!res) return res; i++; } while (i < PHY_CALIB_RETRY_COUNT); return res; } static int msm_otg_phy_reset(struct msm_otg *dev) { unsigned rc; unsigned temp; unsigned long timeout; rc = clk_reset(dev->hs_clk, CLK_RESET_ASSERT); if (rc) { pr_err("%s: usb hs clk assert failed\n", __func__); return -1; } phy_clk_reset(dev); rc = clk_reset(dev->hs_clk, CLK_RESET_DEASSERT); if (rc) { pr_err("%s: usb hs clk deassert failed\n", __func__); return -1; } /* select ULPI phy */ temp = (readl(USB_PORTSC) & ~PORTSC_PTS); writel(temp | PORTSC_PTS_ULPI, USB_PORTSC); rc = msm_otg_phy_caliberate(dev); if (rc) return rc; /* TBD: There are two link resets. One is below and other one * is done immediately after this function. See if we can * eliminate one of these. */ writel(USBCMD_RESET, USB_USBCMD); timeout = jiffies + USB_LINK_RESET_TIMEOUT; do { if (time_after(jiffies, timeout)) { pr_err("msm_otg: usb link reset timeout\n"); break; } msleep(1); } while (readl(USB_USBCMD) & USBCMD_RESET); if (readl(USB_USBCMD) & USBCMD_RESET) { pr_err("%s: usb core reset failed\n", __func__); return -1; } return 0; } static void otg_reset(struct otg_transceiver *xceiv, int phy_reset) { struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg); unsigned long timeout; u32 mode, work = 0; clk_enable(dev->hs_clk); if (!phy_reset) goto reset_link; if (dev->pdata->phy_reset) dev->pdata->phy_reset(dev->regs); else msm_otg_phy_reset(dev); /*disable all phy interrupts*/ ulpi_write(dev, 0xFF, 0x0F); ulpi_write(dev, 0xFF, 0x12); msleep(100); reset_link: writel(USBCMD_RESET, USB_USBCMD); timeout = jiffies + USB_LINK_RESET_TIMEOUT; do { if (time_after(jiffies, timeout)) { pr_err("msm_otg: usb link reset timeout\n"); break; } msleep(1); } while (readl(USB_USBCMD) & USBCMD_RESET); /* select ULPI phy */ writel(0x80000000, USB_PORTSC); set_pre_emphasis_level(dev); set_cdr_auto_reset(dev); set_driver_amplitude(dev); set_se1_gating(dev); writel(0x0, USB_AHB_BURST); writel(0x00, USB_AHB_MODE); clk_disable(dev->hs_clk); if ((xceiv->gadget && xceiv->gadget->is_a_peripheral) || test_bit(ID, &dev->inputs)) mode = USBMODE_SDIS | USBMODE_DEVICE; else mode = USBMODE_SDIS | USBMODE_HOST; writel(mode, USB_USBMODE); if (dev->otg.gadget) { enable_sess_valid(dev); /* Due to the above 100ms delay, interrupts from PHY are * sometimes missed during fast plug-in/plug-out of cable. * Check for such cases here. */ if (is_b_sess_vld() && !test_bit(B_SESS_VLD, &dev->inputs)) { pr_debug("%s: handle missing BSV event\n", __func__); set_bit(B_SESS_VLD, &dev->inputs); work = 1; } else if (!is_b_sess_vld() && test_bit(B_SESS_VLD, &dev->inputs)) { pr_debug("%s: handle missing !BSV event\n", __func__); clear_bit(B_SESS_VLD, &dev->inputs); work = 1; } } #ifdef CONFIG_USB_EHCI_MSM if (dev->otg.host && !dev->pmic_id_notif_supp) { enable_idgnd(dev); /* Handle missing ID_GND interrupts during fast PIPO */ if (is_host() && test_bit(ID, &dev->inputs)) { pr_debug("%s: handle missing ID_GND event\n", __func__); clear_bit(ID, &dev->inputs); work = 1; } else if (!is_host() && !test_bit(ID, &dev->inputs)) { pr_debug("%s: handle missing !ID_GND event\n", __func__); set_bit(ID, &dev->inputs); work = 1; } } else { disable_idgnd(dev); } #endif enable_idabc(dev); if (work) { wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } } static void msm_otg_sm_work(struct work_struct *w) { struct msm_otg *dev = container_of(w, struct msm_otg, sm_work); enum chg_type chg_type = atomic_read(&dev->chg_type); int ret; int work = 0; enum usb_otg_state state; if (atomic_read(&dev->in_lpm)) msm_otg_set_suspend(&dev->otg, 0); spin_lock_irq(&dev->lock); state = dev->otg.state; spin_unlock_irq(&dev->lock); switch (state) { case OTG_STATE_UNDEFINED: /* Reset both phy and link */ otg_reset(&dev->otg, 1); #ifdef CONFIG_USB_MSM_ACA set_aca_id_inputs(dev); #endif if (dev->pdata->otg_mode == OTG_USER_CONTROL) { if ((dev->pdata->usb_mode == USB_PERIPHERAL_MODE) || !dev->otg.host) { set_bit(ID, &dev->inputs); set_bit(B_SESS_VLD, &dev->inputs); } } else { if (!dev->otg.host || !is_host()) set_bit(ID, &dev->inputs); if (dev->otg.gadget && is_b_sess_vld()) set_bit(B_SESS_VLD, &dev->inputs); } spin_lock_irq(&dev->lock); if ((test_bit(ID, &dev->inputs)) && !test_bit(ID_A, &dev->inputs)) { dev->otg.state = OTG_STATE_B_IDLE; } else { set_bit(A_BUS_REQ, &dev->inputs); dev->otg.state = OTG_STATE_A_IDLE; } spin_unlock_irq(&dev->lock); work = 1; break; case OTG_STATE_B_IDLE: dev->otg.default_a = 0; if (!test_bit(ID, &dev->inputs) || test_bit(ID_A, &dev->inputs)) { pr_debug("!id || id_A\n"); clear_bit(B_BUS_REQ, &dev->inputs); otg_reset(&dev->otg, 0); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_IDLE; spin_unlock_irq(&dev->lock); msm_otg_set_power(&dev->otg, 0); work = 1; } else if (test_bit(B_SESS_VLD, &dev->inputs) && !test_bit(ID_B, &dev->inputs)) { pr_debug("b_sess_vld\n"); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_PERIPHERAL; spin_unlock_irq(&dev->lock); msm_otg_set_power(&dev->otg, 0); msm_otg_start_peripheral(&dev->otg, 1); } else if (test_bit(B_BUS_REQ, &dev->inputs)) { pr_debug("b_sess_end && b_bus_req\n"); ret = msm_otg_start_srp(&dev->otg); if (ret < 0) { /* notify user space */ clear_bit(B_BUS_REQ, &dev->inputs); work = 1; break; } spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_SRP_INIT; spin_unlock_irq(&dev->lock); msm_otg_start_timer(dev, TB_SRP_FAIL, B_SRP_FAIL); break; } else if (test_bit(ID_B, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); } else { msm_otg_set_power(&dev->otg, 0); pr_debug("entering into lpm\n"); msm_otg_put_suspend(dev); if (dev->pdata->ldo_set_voltage) dev->pdata->ldo_set_voltage(3075); } break; case OTG_STATE_B_SRP_INIT: if (!test_bit(ID, &dev->inputs) || test_bit(ID_A, &dev->inputs) || test_bit(ID_C, &dev->inputs) || (test_bit(B_SESS_VLD, &dev->inputs) && !test_bit(ID_B, &dev->inputs))) { pr_debug("!id || id_a/c || b_sess_vld+!id_b\n"); msm_otg_del_timer(dev); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); work = 1; } else if (test_bit(B_SRP_FAIL, &dev->tmouts)) { pr_debug("b_srp_fail\n"); /* notify user space */ msm_otg_send_event(&dev->otg, OTG_EVENT_NO_RESP_FOR_SRP); clear_bit(B_BUS_REQ, &dev->inputs); clear_bit(B_SRP_FAIL, &dev->tmouts); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); dev->b_last_se0_sess = jiffies; work = 1; } break; case OTG_STATE_B_PERIPHERAL: if (!test_bit(ID, &dev->inputs) || test_bit(ID_A, &dev->inputs) || test_bit(ID_B, &dev->inputs) || !test_bit(B_SESS_VLD, &dev->inputs)) { pr_debug("!id || id_a/b || !b_sess_vld\n"); clear_bit(B_BUS_REQ, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); msm_otg_start_peripheral(&dev->otg, 0); dev->b_last_se0_sess = jiffies; /* Workaround: Reset phy after session */ otg_reset(&dev->otg, 1); /* come back later to put hardware in * lpm. This removes addition checks in * suspend routine for missing BSV */ work = 1; } else if (test_bit(B_BUS_REQ, &dev->inputs) && dev->otg.gadget->b_hnp_enable && test_bit(A_BUS_SUSPEND, &dev->inputs)) { pr_debug("b_bus_req && b_hnp_en && a_bus_suspend\n"); msm_otg_start_timer(dev, TB_ASE0_BRST, B_ASE0_BRST); msm_otg_start_peripheral(&dev->otg, 0); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_WAIT_ACON; spin_unlock_irq(&dev->lock); /* start HCD even before A-device enable * pull-up to meet HNP timings. */ dev->otg.host->is_b_host = 1; msm_otg_start_host(&dev->otg, REQUEST_START); } else if (test_bit(ID_C, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); } else if (chg_type == USB_CHG_TYPE__WALLCHARGER) { #ifdef CONFIG_USB_MSM_ACA del_timer_sync(&dev->id_timer); #endif /* Workaround: Reset PHY in SE1 state */ otg_reset(&dev->otg, 1); pr_debug("entering into lpm with wall-charger\n"); msm_otg_put_suspend(dev); /* Allow idle power collapse */ otg_pm_qos_update_latency(dev, 0); } break; case OTG_STATE_B_WAIT_ACON: if (!test_bit(ID, &dev->inputs) || test_bit(ID_A, &dev->inputs) || test_bit(ID_B, &dev->inputs) || !test_bit(B_SESS_VLD, &dev->inputs)) { pr_debug("!id || id_a/b || !b_sess_vld\n"); msm_otg_del_timer(dev); /* A-device is physically disconnected during * HNP. Remove HCD. */ msm_otg_start_host(&dev->otg, REQUEST_STOP); dev->otg.host->is_b_host = 0; clear_bit(B_BUS_REQ, &dev->inputs); clear_bit(A_BUS_SUSPEND, &dev->inputs); dev->b_last_se0_sess = jiffies; spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); /* Workaround: Reset phy after session */ otg_reset(&dev->otg, 1); work = 1; } else if (test_bit(A_CONN, &dev->inputs)) { pr_debug("a_conn\n"); clear_bit(A_BUS_SUSPEND, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_HOST; spin_unlock_irq(&dev->lock); if (test_bit(ID_C, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); } } else if (test_bit(B_ASE0_BRST, &dev->tmouts)) { /* TODO: A-device may send reset after * enabling HNP; a_bus_resume case is * not handled for now. */ pr_debug("b_ase0_brst_tmout\n"); msm_otg_send_event(&dev->otg, OTG_EVENT_HNP_FAILED); msm_otg_start_host(&dev->otg, REQUEST_STOP); dev->otg.host->is_b_host = 0; clear_bit(B_ASE0_BRST, &dev->tmouts); clear_bit(A_BUS_SUSPEND, &dev->inputs); clear_bit(B_BUS_REQ, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_PERIPHERAL; spin_unlock_irq(&dev->lock); msm_otg_start_peripheral(&dev->otg, 1); } else if (test_bit(ID_C, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); } break; case OTG_STATE_B_HOST: /* B_BUS_REQ is not exposed to user space. So * it must be A_CONN for now. */ if (!test_bit(B_BUS_REQ, &dev->inputs) || !test_bit(A_CONN, &dev->inputs)) { pr_debug("!b_bus_req || !a_conn\n"); clear_bit(A_CONN, &dev->inputs); clear_bit(B_BUS_REQ, &dev->inputs); msm_otg_start_host(&dev->otg, REQUEST_STOP); dev->otg.host->is_b_host = 0; spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); /* Workaround: Reset phy after session */ otg_reset(&dev->otg, 1); work = 1; } else if (test_bit(ID_C, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); } break; case OTG_STATE_A_IDLE: dev->otg.default_a = 1; if (test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) { pr_debug("id && !id_a\n"); dev->otg.default_a = 0; otg_reset(&dev->otg, 0); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_B_IDLE; spin_unlock_irq(&dev->lock); msm_otg_set_power(&dev->otg, 0); work = 1; } else if (!test_bit(A_BUS_DROP, &dev->inputs) && (test_bit(A_SRP_DET, &dev->inputs) || test_bit(A_BUS_REQ, &dev->inputs))) { pr_debug("!a_bus_drop && (a_srp_det || a_bus_req)\n"); clear_bit(A_SRP_DET, &dev->inputs); /* Disable SRP detection */ writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) & ~OTGSC_DPIE, USB_OTGSC); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VRISE; spin_unlock_irq(&dev->lock); /* ACA: ID_A: Stop charging untill enumeration */ if (test_bit(ID_A, &dev->inputs)) msm_otg_set_power(&dev->otg, 0); else dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1); msm_otg_start_timer(dev, TA_WAIT_VRISE, A_WAIT_VRISE); /* no need to schedule work now */ } else { pr_debug("No session requested\n"); /* A-device is not providing power on VBUS. * Enable SRP detection. */ writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) | OTGSC_DPIE, USB_OTGSC); msm_otg_put_suspend(dev); } break; case OTG_STATE_A_WAIT_VRISE: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs) || test_bit(A_WAIT_VRISE, &dev->tmouts)) { pr_debug("id || a_bus_drop || a_wait_vrise_tmout\n"); clear_bit(A_BUS_REQ, &dev->inputs); msm_otg_del_timer(dev); dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (test_bit(A_VBUS_VLD, &dev->inputs)) { pr_debug("a_vbus_vld\n"); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_BCON; spin_unlock_irq(&dev->lock); if (TA_WAIT_BCON > 0) msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON); /* Start HCD to detect peripherals. */ msm_otg_start_host(&dev->otg, REQUEST_START); } break; case OTG_STATE_A_WAIT_BCON: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs) || test_bit(A_WAIT_BCON, &dev->tmouts)) { pr_debug("id_f/b/c || a_bus_drop ||" "a_wait_bcon_tmout\n"); if (test_bit(A_WAIT_BCON, &dev->tmouts)) msm_otg_send_event(&dev->otg, OTG_EVENT_DEV_CONN_TMOUT); msm_otg_del_timer(dev); clear_bit(A_BUS_REQ, &dev->inputs); msm_otg_start_host(&dev->otg, REQUEST_STOP); /* Reset both phy and link */ otg_reset(&dev->otg, 1); /* ACA: ID_A with NO accessory, just the A plug is * attached to ACA: Use IDCHG_MAX for charging */ if (test_bit(ID_A, &dev->inputs)) msm_otg_set_power(&dev->otg, USB_IDCHG_MAX); else dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (test_bit(B_CONN, &dev->inputs)) { pr_debug("b_conn\n"); msm_otg_del_timer(dev); /* HCD is added already. just move to * A_HOST state. */ spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_HOST; spin_unlock_irq(&dev->lock); if (test_bit(ID_A, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - get_aca_bmaxpower(dev)); } } else if (!test_bit(A_VBUS_VLD, &dev->inputs)) { pr_debug("!a_vbus_vld\n"); msm_otg_del_timer(dev); msm_otg_start_host(&dev->otg, REQUEST_STOP); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_VBUS_ERR; spin_unlock_irq(&dev->lock); /* Reset both phy and link */ otg_reset(&dev->otg, 1); } else if (test_bit(ID_A, &dev->inputs)) { dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); } else if (!test_bit(ID, &dev->inputs)) { dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1); } break; case OTG_STATE_A_HOST: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs)) { pr_debug("id_f/b/c || a_bus_drop\n"); clear_bit(B_CONN, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); msm_otg_start_host(&dev->otg, REQUEST_STOP); /* Reset both phy and link */ otg_reset(&dev->otg, 1); if (!test_bit(ID_A, &dev->inputs)) dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); msm_otg_set_power(&dev->otg, 0); } else if (!test_bit(A_VBUS_VLD, &dev->inputs)) { pr_debug("!a_vbus_vld\n"); clear_bit(B_CONN, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_VBUS_ERR; spin_unlock_irq(&dev->lock); msm_otg_start_host(&dev->otg, REQUEST_STOP); /* Reset both phy and link */ otg_reset(&dev->otg, 1); /* no work */ } else if (!test_bit(A_BUS_REQ, &dev->inputs)) { /* a_bus_req is de-asserted when root hub is * suspended or HNP is in progress. */ pr_debug("!a_bus_req\n"); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_SUSPEND; spin_unlock_irq(&dev->lock); if (dev->otg.host->b_hnp_enable) { msm_otg_start_timer(dev, TA_AIDL_BDIS, A_AIDL_BDIS); } else { /* No HNP. Root hub suspended */ msm_otg_put_suspend(dev); } if (test_bit(ID_A, &dev->inputs)) msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - USB_IB_UNCFG); } else if (!test_bit(B_CONN, &dev->inputs)) { pr_debug("!b_conn\n"); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_BCON; spin_unlock_irq(&dev->lock); if (TA_WAIT_BCON > 0) msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON); } else if (test_bit(ID_A, &dev->inputs)) { dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - get_aca_bmaxpower(dev)); } else if (!test_bit(ID, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID); msm_otg_set_power(&dev->otg, 0); dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1); } break; case OTG_STATE_A_SUSPEND: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs) || test_bit(A_AIDL_BDIS, &dev->tmouts)) { pr_debug("id_f/b/c || a_bus_drop ||" "a_aidl_bdis_tmout\n"); if (test_bit(A_AIDL_BDIS, &dev->tmouts)) msm_otg_send_event(&dev->otg, OTG_EVENT_HNP_FAILED); msm_otg_del_timer(dev); clear_bit(B_CONN, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); msm_otg_start_host(&dev->otg, REQUEST_STOP); dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); /* Reset both phy and link */ otg_reset(&dev->otg, 1); if (!test_bit(ID_A, &dev->inputs)) dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); msm_otg_set_power(&dev->otg, 0); } else if (!test_bit(A_VBUS_VLD, &dev->inputs)) { pr_debug("!a_vbus_vld\n"); msm_otg_del_timer(dev); clear_bit(B_CONN, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_VBUS_ERR; spin_unlock_irq(&dev->lock); msm_otg_start_host(&dev->otg, REQUEST_STOP); /* Reset both phy and link */ otg_reset(&dev->otg, 1); } else if (!test_bit(B_CONN, &dev->inputs) && dev->otg.host->b_hnp_enable) { pr_debug("!b_conn && b_hnp_enable"); /* Clear AIDL_BDIS timer */ msm_otg_del_timer(dev); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_PERIPHERAL; spin_unlock_irq(&dev->lock); msm_otg_start_host(&dev->otg, REQUEST_HNP_SUSPEND); /* We may come here even when B-dev is physically * disconnected during HNP. We go back to host * role if bus is idle for BIDL_ADIS time. */ dev->otg.gadget->is_a_peripheral = 1; msm_otg_start_peripheral(&dev->otg, 1); /* If ID_A: we can charge in a_peripheral as well */ if (test_bit(ID_A, &dev->inputs)) { atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - USB_IB_UNCFG); } } else if (!test_bit(B_CONN, &dev->inputs) && !dev->otg.host->b_hnp_enable) { pr_debug("!b_conn && !b_hnp_enable"); /* bus request is dropped during suspend. * acquire again for next device. */ set_bit(A_BUS_REQ, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_BCON; spin_unlock_irq(&dev->lock); if (TA_WAIT_BCON > 0) msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON); msm_otg_set_power(&dev->otg, 0); } else if (test_bit(ID_A, &dev->inputs)) { dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - USB_IB_UNCFG); } else if (!test_bit(ID, &dev->inputs)) { msm_otg_set_power(&dev->otg, 0); dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1); } break; case OTG_STATE_A_PERIPHERAL: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs)) { pr_debug("id _f/b/c || a_bus_drop\n"); /* Clear BIDL_ADIS timer */ msm_otg_del_timer(dev); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); msm_otg_start_peripheral(&dev->otg, 0); dev->otg.gadget->is_a_peripheral = 0; /* HCD was suspended before. Stop it now */ msm_otg_start_host(&dev->otg, REQUEST_STOP); /* Reset both phy and link */ otg_reset(&dev->otg, 1); if (!test_bit(ID_A, &dev->inputs)) dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); msm_otg_set_power(&dev->otg, 0); } else if (!test_bit(A_VBUS_VLD, &dev->inputs)) { pr_debug("!a_vbus_vld\n"); /* Clear BIDL_ADIS timer */ msm_otg_del_timer(dev); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_VBUS_ERR; spin_unlock_irq(&dev->lock); msm_otg_start_peripheral(&dev->otg, 0); dev->otg.gadget->is_a_peripheral = 0; /* HCD was suspended before. Stop it now */ msm_otg_start_host(&dev->otg, REQUEST_STOP); } else if (test_bit(A_BIDL_ADIS, &dev->tmouts)) { pr_debug("a_bidl_adis_tmout\n"); msm_otg_start_peripheral(&dev->otg, 0); dev->otg.gadget->is_a_peripheral = 0; spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_BCON; spin_unlock_irq(&dev->lock); set_bit(A_BUS_REQ, &dev->inputs); msm_otg_start_host(&dev->otg, REQUEST_HNP_RESUME); if (TA_WAIT_BCON > 0) msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON); msm_otg_set_power(&dev->otg, 0); } else if (test_bit(ID_A, &dev->inputs)) { dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP); msm_otg_set_power(&dev->otg, USB_IDCHG_MIN - USB_IB_UNCFG); } else if (!test_bit(ID, &dev->inputs)) { msm_otg_set_power(&dev->otg, 0); dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1); } break; case OTG_STATE_A_WAIT_VFALL: if (test_bit(A_WAIT_VFALL, &dev->tmouts)) { clear_bit(A_VBUS_VLD, &dev->inputs); spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_IDLE; spin_unlock_irq(&dev->lock); work = 1; } break; case OTG_STATE_A_VBUS_ERR: if ((test_bit(ID, &dev->inputs) && !test_bit(ID_A, &dev->inputs)) || test_bit(A_BUS_DROP, &dev->inputs) || test_bit(A_CLR_ERR, &dev->inputs)) { spin_lock_irq(&dev->lock); dev->otg.state = OTG_STATE_A_WAIT_VFALL; spin_unlock_irq(&dev->lock); if (!test_bit(ID_A, &dev->inputs)) dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0); msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL); msm_otg_set_power(&dev->otg, 0); } break; default: pr_err("invalid OTG state\n"); } if (work) queue_work(dev->wq, &dev->sm_work); #ifdef CONFIG_USB_MSM_ACA /* Start id_polling if (ID_FLOAT&BSV) || ID_A/B/C */ if ((test_bit(ID, &dev->inputs) && test_bit(B_SESS_VLD, &dev->inputs) && chg_type != USB_CHG_TYPE__WALLCHARGER) || test_bit(ID_A, &dev->inputs)) { mod_timer(&dev->id_timer, jiffies + msecs_to_jiffies(OTG_ID_POLL_MS)); return; } del_timer(&dev->id_timer); #endif /* IRQ/sysfs may queue work. Check work_pending. otherwise * we might endup releasing wakelock after it is acquired * in IRQ/sysfs. */ if (!work_pending(&dev->sm_work) && !hrtimer_active(&dev->timer) && !work_pending(&dev->otg_resume_work)) wake_unlock(&dev->wlock); } #ifdef CONFIG_USB_MSM_ACA static void msm_otg_id_func(unsigned long _dev) { struct msm_otg *dev = (struct msm_otg *) _dev; u8 phy_ints; if (atomic_read(&dev->in_lpm)) msm_otg_set_suspend(&dev->otg, 0); phy_ints = ulpi_read(dev, 0x13); /* If id_gnd happened then stop and let isr take care of this */ if (phy_id_state_gnd(phy_ints)) goto out; if ((test_bit(ID_A, &dev->inputs) == phy_id_state_a(phy_ints)) && (test_bit(ID_B, &dev->inputs) == phy_id_state_b(phy_ints)) && (test_bit(ID_C, &dev->inputs) == phy_id_state_c(phy_ints))) { mod_timer(&dev->id_timer, jiffies + msecs_to_jiffies(OTG_ID_POLL_MS)); goto out; } else { set_aca_id_inputs(dev); } wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); out: return; } #endif #ifdef CONFIG_USB_OTG static ssize_t set_pwr_down(struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_otg *dev = the_msm_otg; int value; enum usb_otg_state state; spin_lock_irq(&dev->lock); state = dev->otg.state; spin_unlock_irq(&dev->lock); /* Applicable for only A-Device */ if (state <= OTG_STATE_A_IDLE) return -EINVAL; sscanf(buf, "%d", &value); if (test_bit(A_BUS_DROP, &dev->inputs) != !!value) { change_bit(A_BUS_DROP, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } return count; } static DEVICE_ATTR(pwr_down, S_IRUGO | S_IWUSR, NULL, set_pwr_down); static ssize_t set_srp_req(struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_otg *dev = the_msm_otg; enum usb_otg_state state; spin_lock_irq(&dev->lock); state = dev->otg.state; spin_unlock_irq(&dev->lock); if (state != OTG_STATE_B_IDLE) return -EINVAL; set_bit(B_BUS_REQ, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); return count; } static DEVICE_ATTR(srp_req, S_IRUGO | S_IWUSR, NULL, set_srp_req); static ssize_t set_clr_err(struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_otg *dev = the_msm_otg; enum usb_otg_state state; spin_lock_irq(&dev->lock); state = dev->otg.state; spin_unlock_irq(&dev->lock); if (state == OTG_STATE_A_VBUS_ERR) { set_bit(A_CLR_ERR, &dev->inputs); wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } return count; } static DEVICE_ATTR(clr_err, S_IRUGO | S_IWUSR, NULL, set_clr_err); static struct attribute *msm_otg_attrs[] = { &dev_attr_pwr_down.attr, &dev_attr_srp_req.attr, &dev_attr_clr_err.attr, NULL, }; static struct attribute_group msm_otg_attr_grp = { .attrs = msm_otg_attrs, }; #endif #ifdef CONFIG_DEBUG_FS static int otg_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t otg_mode_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct msm_otg *dev = file->private_data; int ret = count; int work = 0; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (!memcmp(buf, "none", count - 1)) { clear_bit(B_SESS_VLD, &dev->inputs); set_bit(ID, &dev->inputs); work = 1; } else if (!memcmp(buf, "peripheral", count - 1)) { set_bit(B_SESS_VLD, &dev->inputs); set_bit(ID, &dev->inputs); work = 1; } else if (!memcmp(buf, "host", count - 1)) { clear_bit(B_SESS_VLD, &dev->inputs); clear_bit(ID, &dev->inputs); set_bit(A_BUS_REQ, &dev->inputs); work = 1; } else { pr_info("%s: unknown mode specified\n", __func__); ret = -EINVAL; } spin_unlock_irqrestore(&dev->lock, flags); if (work) { wake_lock(&dev->wlock); queue_work(dev->wq, &dev->sm_work); } return ret; } const struct file_operations otgfs_fops = { .open = otg_open, .write = otg_mode_write, }; struct dentry *otg_debug_root; struct dentry *otg_debug_mode; #endif static int otg_debugfs_init(struct msm_otg *dev) { #ifdef CONFIG_DEBUG_FS otg_debug_root = debugfs_create_dir("otg", NULL); if (!otg_debug_root) return -ENOENT; otg_debug_mode = debugfs_create_file("mode", 0222, otg_debug_root, dev, &otgfs_fops); if (!otg_debug_mode) { debugfs_remove(otg_debug_root); otg_debug_root = NULL; return -ENOENT; } #endif return 0; } static void otg_debugfs_cleanup(void) { #ifdef CONFIG_DEBUG_FS debugfs_remove(otg_debug_mode); debugfs_remove(otg_debug_root); #endif } struct otg_io_access_ops msm_otg_io_ops = { .read = usb_ulpi_read, .write = usb_ulpi_write, }; static int __init msm_otg_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct msm_otg *dev; dev = kzalloc(sizeof(struct msm_otg), GFP_KERNEL); if (!dev) return -ENOMEM; dev->otg.dev = &pdev->dev; dev->pdata = pdev->dev.platform_data; if (!dev->pdata) { ret = -ENODEV; goto free_dev; } #ifdef CONFIG_USB_EHCI_MSM if (!dev->pdata->vbus_power) { ret = -ENODEV; goto free_dev; } #endif if (dev->pdata->rpc_connect) { ret = dev->pdata->rpc_connect(1); pr_debug("%s: rpc_connect(%d)\n", __func__, ret); if (ret) { pr_err("%s: rpc connect failed\n", __func__); ret = -ENODEV; goto free_dev; } } dev->hs_clk = clk_get(&pdev->dev, "usb_hs_clk"); if (IS_ERR(dev->hs_clk)) { pr_err("%s: failed to get usb_hs_clk\n", __func__); ret = PTR_ERR(dev->hs_clk); goto rpc_fail; } clk_set_rate(dev->hs_clk, 60000000); if (dev->pdata->usb_in_sps) { dev->dfab_clk = clk_get(0, "dfab_clk"); if (IS_ERR(dev->dfab_clk)) { pr_err("%s: failed to get dfab clk\n", __func__); ret = PTR_ERR(dev->dfab_clk); goto put_hs_clk; } } /* If USB Core is running its protocol engine based on PCLK, * PCLK must be running at >60Mhz for correct HSUSB operation and * USB core cannot tolerate frequency changes on PCLK. For such * USB cores, vote for maximum clk frequency on pclk source */ dev->pdata->pm_qos_req_dma = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); if (pclk_requires_voting(&dev->otg) && !dev->pdata->usb_in_sps) { dev->pdata->ebi1_clk = clk_get(NULL, "ebi1_usb_clk"); if (IS_ERR(dev->pdata->ebi1_clk)) { ret = PTR_ERR(dev->pdata->ebi1_clk); goto put_dfab_clk; } clk_set_rate(dev->pdata->ebi1_clk, INT_MAX); msm_otg_vote_for_pclk_source(dev, 1); } if (!dev->pdata->pclk_is_hw_gated) { dev->hs_pclk = clk_get(&pdev->dev, "usb_hs_pclk"); if (IS_ERR(dev->hs_pclk)) { pr_err("%s: failed to get usb_hs_pclk\n", __func__); ret = PTR_ERR(dev->hs_pclk); goto put_ebi_clk; } clk_enable(dev->hs_pclk); } if (dev->pdata->core_clk) { dev->hs_cclk = clk_get(&pdev->dev, "usb_hs_core_clk"); if (IS_ERR(dev->hs_cclk)) { pr_err("%s: failed to get usb_hs_core_clk\n", __func__); ret = PTR_ERR(dev->hs_cclk); goto put_hs_pclk; } clk_enable(dev->hs_cclk); } if (!dev->pdata->phy_reset) { dev->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk"); if (IS_ERR(dev->phy_reset_clk)) { pr_err("%s: failed to get usb_phy_clk\n", __func__); ret = PTR_ERR(dev->phy_reset_clk); goto put_hs_cclk; } } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("%s: failed to get platform resource mem\n", __func__); ret = -ENODEV; goto put_phy_clk; } dev->regs = ioremap(res->start, resource_size(res)); if (!dev->regs) { pr_err("%s: ioremap failed\n", __func__); ret = -ENOMEM; goto put_phy_clk; } dev->irq = platform_get_irq(pdev, 0); if (!dev->irq) { pr_err("%s: platform_get_irq failed\n", __func__); ret = -ENODEV; goto free_regs; } dev->xo_handle = msm_xo_get(MSM_XO_TCXO_D1, "usb"); if (IS_ERR(dev->xo_handle)) { pr_err(" %s not able to get the handle" "to vote for TCXO D1 buffer\n", __func__); ret = PTR_ERR(dev->xo_handle); goto free_regs; } ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON); if (ret) { pr_err("%s failed to vote for TCXO" "D1 buffer%d\n", __func__, ret); goto free_xo_handle; } msm_otg_init_timer(dev); INIT_WORK(&dev->sm_work, msm_otg_sm_work); INIT_WORK(&dev->otg_resume_work, msm_otg_resume_w); spin_lock_init(&dev->lock); wake_lock_init(&dev->wlock, WAKE_LOCK_SUSPEND, "msm_otg"); dev->wq = create_singlethread_workqueue("k_otg"); if (!dev->wq) { ret = -ENOMEM; goto free_wlock; } if (dev->pdata->init_gpio) { ret = dev->pdata->init_gpio(1); if (ret) { pr_err("%s: gpio init failed with err:%d\n", __func__, ret); goto free_wq; } } /* To reduce phy power consumption and to avoid external LDO * on the board, PMIC comparators can be used to detect VBUS * session change. */ if (dev->pdata->pmic_vbus_notif_init) { ret = dev->pdata->pmic_vbus_notif_init (&msm_otg_set_vbus_state, 1); if (!ret) { dev->pmic_vbus_notif_supp = 1; } else if (ret != -ENOTSUPP) { pr_err("%s: pmic_vbus_notif_init() failed, err:%d\n", __func__, ret); goto free_gpio; } } if (dev->pdata->pmic_id_notif_init) { ret = dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 1); if (!ret) { dev->pmic_id_notif_supp = 1; /* * As a part of usb initialization checks the id * by that time if pmic doesn't generate ID interrupt, * then it assumes that micro-A cable is connected * (as default pmic_id_status is 0, which indicates * as micro-A cable) and moving to Host mode and * ignoring the BSession Valid interrupts. * For now assigning default id_status as 1 * (which indicates as micro-B) */ dev->pmic_id_status = 1; } else if (ret != -ENOTSUPP) { pr_err("%s: pmic_id_ notif_init failed err:%d", __func__, ret); goto free_pmic_vbus_notif; } } if (dev->pdata->pmic_vbus_irq) dev->vbus_on_irq = dev->pdata->pmic_vbus_irq; /* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */ if (dev->pdata->init_vddcx) { ret = dev->pdata->init_vddcx(1); if (ret) { pr_err("%s: unable to enable vddcx digital core:%d\n", __func__, ret); goto free_pmic_id_notif; } } if (dev->pdata->ldo_init) { ret = dev->pdata->ldo_init(1); if (ret) { pr_err("%s: ldo_init failed with err:%d\n", __func__, ret); goto free_config_vddcx; } } if (dev->pdata->ldo_enable) { ret = dev->pdata->ldo_enable(1); if (ret) { pr_err("%s: ldo_enable failed with err:%d\n", __func__, ret); goto free_ldo_init; } } /* ACk all pending interrupts and clear interrupt enable registers */ writel((readl(USB_OTGSC) & ~OTGSC_INTR_MASK), USB_OTGSC); writel(readl(USB_USBSTS), USB_USBSTS); writel(0, USB_USBINTR); ret = request_irq(dev->irq, msm_otg_irq, IRQF_SHARED, "msm_otg", dev); if (ret) { pr_err("%s: request irq failed\n", __func__); goto free_ldo_enable; } the_msm_otg = dev; dev->otg.set_peripheral = msm_otg_set_peripheral; #ifdef CONFIG_USB_EHCI_MSM dev->otg.set_host = msm_otg_set_host; #endif dev->otg.set_suspend = msm_otg_set_suspend; dev->otg.start_hnp = msm_otg_start_hnp; dev->otg.send_event = msm_otg_send_event; dev->otg.set_power = msm_otg_set_power; dev->set_clk = msm_otg_set_clk; dev->reset = otg_reset; dev->otg.io_ops = &msm_otg_io_ops; if (otg_set_transceiver(&dev->otg)) { WARN_ON(1); goto free_otg_irq; } #ifdef CONFIG_USB_MSM_ACA /* Link doesnt support id_a/b/c interrupts, hence polling * needs to be done to support ACA charger */ init_timer(&dev->id_timer); dev->id_timer.function = msm_otg_id_func; dev->id_timer.data = (unsigned long) dev; #endif atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID); if (dev->pdata->chg_init && dev->pdata->chg_init(1)) pr_err("%s: chg_init failed\n", __func__); device_init_wakeup(&pdev->dev, 1); ret = pm_runtime_set_active(&pdev->dev); if (ret < 0) pr_err("%s: pm_runtime: Fail to set active\n", __func__); ret = 0; pm_runtime_enable(&pdev->dev); pm_runtime_get(&pdev->dev); ret = otg_debugfs_init(dev); if (ret) { pr_err("%s: otg_debugfs_init failed\n", __func__); goto chg_deinit; } #ifdef CONFIG_USB_OTG ret = sysfs_create_group(&pdev->dev.kobj, &msm_otg_attr_grp); if (ret < 0) { pr_err("%s: Failed to create the sysfs entry\n", __func__); otg_debugfs_cleanup(); goto chg_deinit; } #endif return 0; chg_deinit: if (dev->pdata->chg_init) dev->pdata->chg_init(0); free_otg_irq: free_irq(dev->irq, dev); free_ldo_enable: if (dev->pdata->ldo_enable) dev->pdata->ldo_enable(0); if (dev->pdata->setup_gpio) dev->pdata->setup_gpio(USB_SWITCH_DISABLE); free_ldo_init: if (dev->pdata->ldo_init) dev->pdata->ldo_init(0); free_config_vddcx: if (dev->pdata->init_vddcx) dev->pdata->init_vddcx(0); free_pmic_id_notif: if (dev->pdata->pmic_id_notif_init && dev->pmic_id_notif_supp) dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0); free_pmic_vbus_notif: if (dev->pdata->pmic_vbus_notif_init && dev->pmic_vbus_notif_supp) dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0); free_gpio: if (dev->pdata->init_gpio) dev->pdata->init_gpio(0); free_wq: destroy_workqueue(dev->wq); free_wlock: wake_lock_destroy(&dev->wlock); free_xo_handle: msm_xo_put(dev->xo_handle); free_regs: iounmap(dev->regs); put_phy_clk: if (dev->phy_reset_clk) clk_put(dev->phy_reset_clk); put_hs_cclk: if (dev->hs_cclk) { clk_disable(dev->hs_cclk); clk_put(dev->hs_cclk); } put_hs_pclk: if (dev->hs_pclk) { clk_disable(dev->hs_pclk); clk_put(dev->hs_pclk); } put_ebi_clk: clk_put(dev->pdata->ebi1_clk); put_dfab_clk: if (dev->dfab_clk) { clk_set_min_rate(dev->dfab_clk, 0); clk_put(dev->dfab_clk); } put_hs_clk: if (dev->hs_clk) clk_put(dev->hs_clk); rpc_fail: if (dev->pdata->rpc_connect) dev->pdata->rpc_connect(0); free_dev: kfree(dev); return ret; } static int __exit msm_otg_remove(struct platform_device *pdev) { struct msm_otg *dev = the_msm_otg; otg_debugfs_cleanup(); #ifdef CONFIG_USB_OTG sysfs_remove_group(&pdev->dev.kobj, &msm_otg_attr_grp); #endif destroy_workqueue(dev->wq); wake_lock_destroy(&dev->wlock); if (dev->pdata->setup_gpio) dev->pdata->setup_gpio(USB_SWITCH_DISABLE); if (dev->pdata->init_vddcx) dev->pdata->init_vddcx(0); if (dev->pdata->ldo_enable) dev->pdata->ldo_enable(0); if (dev->pdata->ldo_init) dev->pdata->ldo_init(0); if (dev->pmic_vbus_notif_supp) dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0); if (dev->pmic_id_notif_supp) dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0); #ifdef CONFIG_USB_MSM_ACA del_timer_sync(&dev->id_timer); #endif if (dev->pdata->chg_init) dev->pdata->chg_init(0); free_irq(dev->irq, pdev); iounmap(dev->regs); if (dev->hs_cclk) { clk_disable(dev->hs_cclk); clk_put(dev->hs_cclk); } if (dev->hs_pclk) { clk_disable(dev->hs_pclk); clk_put(dev->hs_pclk); } if (dev->hs_clk) clk_put(dev->hs_clk); if (dev->phy_reset_clk) clk_put(dev->phy_reset_clk); if (dev->pdata->rpc_connect) dev->pdata->rpc_connect(0); msm_xo_put(dev->xo_handle); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); kfree(dev); pm_qos_remove_request(dev->pdata->pm_qos_req_dma); clk_put(dev->pdata->ebi1_clk); return 0; } static int msm_otg_runtime_suspend(struct device *dev) { struct msm_otg *otg = the_msm_otg; dev_dbg(dev, "pm_runtime: suspending...\n"); msm_otg_suspend(otg); return 0; } static int msm_otg_runtime_resume(struct device *dev) { struct msm_otg *otg = the_msm_otg; dev_dbg(dev, "pm_runtime: resuming...\n"); msm_otg_resume(otg); return 0; } static int msm_otg_runtime_idle(struct device *dev) { dev_dbg(dev, "pm_runtime: idling...\n"); return 0; } static struct dev_pm_ops msm_otg_dev_pm_ops = { .runtime_suspend = msm_otg_runtime_suspend, .runtime_resume = msm_otg_runtime_resume, .runtime_idle = msm_otg_runtime_idle, }; static struct platform_driver msm_otg_driver = { .remove = __exit_p(msm_otg_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &msm_otg_dev_pm_ops, }, }; static int __init msm_otg_init(void) { return platform_driver_probe(&msm_otg_driver, msm_otg_probe); } static void __exit msm_otg_exit(void) { platform_driver_unregister(&msm_otg_driver); } module_init(msm_otg_init); module_exit(msm_otg_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM usb transceiver driver"); MODULE_VERSION("1.00");
Java
/* Copyright (C) 2006 - 2012 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Instance_Hellfire_Ramparts SD%Complete: 50 SDComment: SDCategory: Hellfire Ramparts EndScriptData */ #include "precompiled.h" #include "hellfire_ramparts.h" instance_ramparts::instance_ramparts(Map* pMap) : ScriptedInstance(pMap), m_uiSentryCounter(0) { Initialize(); } void instance_ramparts::Initialize() { memset(&m_auiEncounter, 0, sizeof(m_auiEncounter)); } void instance_ramparts::OnCreatureCreate(Creature* pCreature) { switch (pCreature->GetEntry()) { case NPC_VAZRUDEN_HERALD: case NPC_VAZRUDEN: m_mNpcEntryGuidStore[pCreature->GetEntry()] = pCreature->GetObjectGuid(); break; case NPC_HELLFIRE_SENTRY: m_lSentryGUIDs.push_back(pCreature->GetObjectGuid()); break; } } void instance_ramparts::OnObjectCreate(GameObject* pGo) { switch (pGo->GetEntry()) { case GO_FEL_IRON_CHEST: case GO_FEL_IRON_CHEST_H: m_mGoEntryGuidStore[pGo->GetEntry()] = pGo->GetObjectGuid(); break; } } void instance_ramparts::SetData(uint32 uiType, uint32 uiData) { debug_log("SD2: Instance Ramparts: SetData received for type %u with data %u",uiType,uiData); switch (uiType) { case TYPE_VAZRUDEN: if (uiData == DONE && m_auiEncounter[1] == DONE) DoRespawnGameObject(instance->IsRegularDifficulty() ? GO_FEL_IRON_CHEST : GO_FEL_IRON_CHEST_H, HOUR); if (uiData == FAIL && m_auiEncounter[0] != FAIL) DoFailVazruden(); m_auiEncounter[0] = uiData; break; case TYPE_NAZAN: if (uiData == SPECIAL) { ++m_uiSentryCounter; if (m_uiSentryCounter == 2) m_auiEncounter[1] = uiData; return; } if (uiData == DONE && m_auiEncounter[0] == DONE) { DoRespawnGameObject(instance->IsRegularDifficulty() ? GO_FEL_IRON_CHEST : GO_FEL_IRON_CHEST_H, HOUR); DoToggleGameObjectFlags(instance->IsRegularDifficulty() ? GO_FEL_IRON_CHEST : GO_FEL_IRON_CHEST_H, GO_FLAG_NO_INTERACT, false); } if (uiData == FAIL && m_auiEncounter[1] != FAIL) DoFailVazruden(); m_auiEncounter[1] = uiData; break; } } uint32 instance_ramparts::GetData(uint32 uiType) { if (uiType == TYPE_VAZRUDEN) return m_auiEncounter[0]; if (uiType == TYPE_NAZAN) return m_auiEncounter[1]; return 0; } void instance_ramparts::DoFailVazruden() { // Store FAIL for both types m_auiEncounter[0] = FAIL; m_auiEncounter[1] = FAIL; // Restore Sentries (counter and respawn them) m_uiSentryCounter = 0; for (GuidList::const_iterator itr = m_lSentryGUIDs.begin(); itr != m_lSentryGUIDs.end(); ++itr) { if (Creature* pSentry = instance->GetCreature(*itr)) pSentry->Respawn(); } // Respawn or Reset Vazruden the herald if (Creature* pVazruden = GetSingleCreatureFromStorage(NPC_VAZRUDEN_HERALD)) { if (!pVazruden->isAlive()) pVazruden->Respawn(); else { if (ScriptedAI* pVazrudenAI = dynamic_cast<ScriptedAI*> (pVazruden->AI())) pVazrudenAI->Reset(); } } // Despawn Vazruden if (Creature* pVazruden = GetSingleCreatureFromStorage(NPC_VAZRUDEN)) pVazruden->ForcedDespawn(); } InstanceData* GetInstanceData_instance_ramparts(Map* pMap) { return new instance_ramparts(pMap); } void AddSC_instance_ramparts() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "instance_ramparts"; pNewScript->GetInstanceData = &GetInstanceData_instance_ramparts; pNewScript->RegisterSelf(); }
Java
// { dg-require-namedlocale "fr_FR.ISO8859-15" } // 2001-07-17 Benjamin Kosnik <[email protected]> // Copyright (C) 2001-2021 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. // 22.2.7.1.1 messages members #include <locale> #include <testsuite_hooks.h> void test02() { using namespace std; typedef std::messages<char>::catalog catalog; typedef std::messages<char>::string_type string_type; // This is defined through CXXFLAGS in scripts/testsuite_flags[.in]. const char* dir = LOCALEDIR; // basic construction locale loc_fr = locale(ISO_8859(15,fr_FR)); VERIFY( locale::classic() != loc_fr ); // cache the messages facets const messages<char>& mssg_fr = use_facet<messages<char> >(loc_fr); // catalog open(const string&, const locale&) const; // string_type get(catalog, int, int, const string_type& ) const; // void close(catalog) const; // Check French (fr_FR) locale. catalog cat_fr = mssg_fr.open("libstdc++", loc_fr, dir); string s01 = mssg_fr.get(cat_fr, 0, 0, "please"); string s02 = mssg_fr.get(cat_fr, 0, 0, "thank you"); VERIFY ( s01 == "s'il vous plaît" ); VERIFY ( s02 == "merci" ); mssg_fr.close(cat_fr); } int main() { test02(); return 0; }
Java
<?php /* V4.81 3 May 2006 (c) 2000-2006 John Lim. All rights reserved. Released under both BSD license and Lesser GPL library license. Whenever there is any discrepancy between the two licenses, the BSD license will take precedence. Set tabs to 4 for best viewing. Latest version is available at http://adodb.sourceforge.net Sybase driver contributed by Toni ([email protected]) - MSSQL date patch applied. Date patch by Toni 15 Feb 2002 */ // security - hide paths if (!defined('ADODB_DIR')) die(); class ADODB_sybase extends ADOConnection { var $databaseType = "sybase"; var $dataProvider = 'sybase'; var $replaceQuote = "''"; // string to use to replace quotes var $fmtDate = "'Y-m-d'"; var $fmtTimeStamp = "'Y-m-d H:i:s'"; var $hasInsertID = true; var $hasAffectedRows = true; var $metaTablesSQL="select name from sysobjects where type='U' or type='V'"; // see http://sybooks.sybase.com/onlinebooks/group-aw/awg0800e/dbrfen8/@ebt-link;pt=5981;uf=0?target=0;window=new;showtoc=true;book=dbrfen8 var $metaColumnsSQL = "SELECT c.column_name, c.column_type, c.width FROM syscolumn c, systable t WHERE t.table_name='%s' AND c.table_id=t.table_id AND t.table_type='BASE'"; /* "select c.name,t.name,c.length from syscolumns c join systypes t on t.xusertype=c.xusertype join sysobjects o on o.id=c.id where o.name='%s'"; */ var $concat_operator = '+'; var $arrayClass = 'ADORecordSet_array_sybase'; var $sysDate = 'GetDate()'; var $leftOuter = '*='; var $rightOuter = '=*'; function ADODB_sybase() { } // might require begintrans -- committrans function _insertid() { return $this->GetOne('select @@identity'); } // might require begintrans -- committrans function _affectedrows() { return $this->GetOne('select @@rowcount'); } function BeginTrans() { if ($this->transOff) return true; $this->transCnt += 1; $this->Execute('BEGIN TRAN'); return true; } function CommitTrans($ok=true) { if ($this->transOff) return true; if (!$ok) return $this->RollbackTrans(); $this->transCnt -= 1; $this->Execute('COMMIT TRAN'); return true; } function RollbackTrans() { if ($this->transOff) return true; $this->transCnt -= 1; $this->Execute('ROLLBACK TRAN'); return true; } // http://www.isug.com/Sybase_FAQ/ASE/section6.1.html#6.1.4 function RowLock($tables,$where,$flds='top 1 null as ignore') { if (!$this->_hastrans) $this->BeginTrans(); $tables = str_replace(',',' HOLDLOCK,',$tables); return $this->GetOne("select $flds from $tables HOLDLOCK where $where"); } function SelectDB($dbName) { $this->database = $dbName; $this->databaseName = $dbName; # obsolete, retained for compat with older adodb versions if ($this->_connectionID) { return @sybase_select_db($dbName); } else return false; } /* Returns: the last error message from previous database operation Note: This function is NOT available for Microsoft SQL Server. */ function ErrorMsg() { if ($this->_logsql) return $this->_errorMsg; if (function_exists('sybase_get_last_message')) $this->_errorMsg = sybase_get_last_message(); else $this->_errorMsg = isset($php_errormsg) ? $php_errormsg : 'SYBASE error messages not supported on this platform'; return $this->_errorMsg; } // returns true or false function _connect($argHostname, $argUsername, $argPassword, $argDatabasename) { if (!function_exists('sybase_connect')) return null; $this->_connectionID = sybase_connect($argHostname,$argUsername,$argPassword); if ($this->_connectionID === false) return false; if ($argDatabasename) return $this->SelectDB($argDatabasename); return true; } // returns true or false function _pconnect($argHostname, $argUsername, $argPassword, $argDatabasename) { if (!function_exists('sybase_connect')) return null; $this->_connectionID = sybase_pconnect($argHostname,$argUsername,$argPassword); if ($this->_connectionID === false) return false; if ($argDatabasename) return $this->SelectDB($argDatabasename); return true; } // returns query ID if successful, otherwise false function _query($sql,$inputarr) { global $ADODB_COUNTRECS; if ($ADODB_COUNTRECS == false && ADODB_PHPVER >= 0x4300) return sybase_unbuffered_query($sql,$this->_connectionID); else return sybase_query($sql,$this->_connectionID); } // See http://www.isug.com/Sybase_FAQ/ASE/section6.2.html#6.2.12 function &SelectLimit($sql,$nrows=-1,$offset=-1,$inputarr=false,$secs2cache=0) { if ($secs2cache > 0) {// we do not cache rowcount, so we have to load entire recordset $rs =& ADOConnection::SelectLimit($sql,$nrows,$offset,$inputarr,$secs2cache); return $rs; } $nrows = (integer) $nrows; $offset = (integer) $offset; $cnt = ($nrows >= 0) ? $nrows : 999999999; if ($offset > 0 && $cnt) $cnt += $offset; $this->Execute("set rowcount $cnt"); $rs =& ADOConnection::SelectLimit($sql,$nrows,$offset,$inputarr,0); $this->Execute("set rowcount 0"); return $rs; } // returns true or false function _close() { return @sybase_close($this->_connectionID); } function UnixDate($v) { return ADORecordSet_array_sybase::UnixDate($v); } function UnixTimeStamp($v) { return ADORecordSet_array_sybase::UnixTimeStamp($v); } # Added 2003-10-05 by Chris Phillipson # Used ASA SQL Reference Manual -- http://sybooks.sybase.com/onlinebooks/group-aw/awg0800e/dbrfen8/@ebt-link;pt=16756?target=%25N%15_12018_START_RESTART_N%25 # to convert similar Microsoft SQL*Server (mssql) API into Sybase compatible version // Format date column in sql string given an input format that understands Y M D function SQLDate($fmt, $col=false) { if (!$col) $col = $this->sysTimeStamp; $s = ''; $len = strlen($fmt); for ($i=0; $i < $len; $i++) { if ($s) $s .= '+'; $ch = $fmt[$i]; switch($ch) { case 'Y': case 'y': $s .= "datename(yy,$col)"; break; case 'M': $s .= "convert(char(3),$col,0)"; break; case 'm': $s .= "replace(str(month($col),2),' ','0')"; break; case 'Q': case 'q': $s .= "datename(qq,$col)"; break; case 'D': case 'd': $s .= "replace(str(datepart(dd,$col),2),' ','0')"; break; case 'h': $s .= "substring(convert(char(14),$col,0),13,2)"; break; case 'H': $s .= "replace(str(datepart(hh,$col),2),' ','0')"; break; case 'i': $s .= "replace(str(datepart(mi,$col),2),' ','0')"; break; case 's': $s .= "replace(str(datepart(ss,$col),2),' ','0')"; break; case 'a': case 'A': $s .= "substring(convert(char(19),$col,0),18,2)"; break; default: if ($ch == '\\') { $i++; $ch = substr($fmt,$i,1); } $s .= $this->qstr($ch); break; } } return $s; } # Added 2003-10-07 by Chris Phillipson # Used ASA SQL Reference Manual -- http://sybooks.sybase.com/onlinebooks/group-aw/awg0800e/dbrfen8/@ebt-link;pt=5981;uf=0?target=0;window=new;showtoc=true;book=dbrfen8 # to convert similar Microsoft SQL*Server (mssql) API into Sybase compatible version function MetaPrimaryKeys($table) { $sql = "SELECT c.column_name " . "FROM syscolumn c, systable t " . "WHERE t.table_name='$table' AND c.table_id=t.table_id " . "AND t.table_type='BASE' " . "AND c.pkey = 'Y' " . "ORDER BY c.column_id"; $a = $this->GetCol($sql); if ($a && sizeof($a)>0) return $a; return false; } } /*-------------------------------------------------------------------------------------- Class Name: Recordset --------------------------------------------------------------------------------------*/ global $ADODB_sybase_mths; $ADODB_sybase_mths = array( 'JAN'=>1,'FEB'=>2,'MAR'=>3,'APR'=>4,'MAY'=>5,'JUN'=>6, 'JUL'=>7,'AUG'=>8,'SEP'=>9,'OCT'=>10,'NOV'=>11,'DEC'=>12); class ADORecordset_sybase extends ADORecordSet { var $databaseType = "sybase"; var $canSeek = true; // _mths works only in non-localised system var $_mths = array('JAN'=>1,'FEB'=>2,'MAR'=>3,'APR'=>4,'MAY'=>5,'JUN'=>6,'JUL'=>7,'AUG'=>8,'SEP'=>9,'OCT'=>10,'NOV'=>11,'DEC'=>12); function ADORecordset_sybase($id,$mode=false) { if ($mode === false) { global $ADODB_FETCH_MODE; $mode = $ADODB_FETCH_MODE; } if (!$mode) $this->fetchMode = ADODB_FETCH_ASSOC; else $this->fetchMode = $mode; $this->ADORecordSet($id,$mode); } /* Returns: an object containing field information. Get column information in the Recordset object. fetchField() can be used in order to obtain information about fields in a certain query result. If the field offset isn't specified, the next field that wasn't yet retrieved by fetchField() is retrieved. */ function &FetchField($fieldOffset = -1) { if ($fieldOffset != -1) { $o = @sybase_fetch_field($this->_queryID, $fieldOffset); } else if ($fieldOffset == -1) { /* The $fieldOffset argument is not provided thus its -1 */ $o = @sybase_fetch_field($this->_queryID); } // older versions of PHP did not support type, only numeric if ($o && !isset($o->type)) $o->type = ($o->numeric) ? 'float' : 'varchar'; return $o; } function _initrs() { global $ADODB_COUNTRECS; $this->_numOfRows = ($ADODB_COUNTRECS)? @sybase_num_rows($this->_queryID):-1; $this->_numOfFields = @sybase_num_fields($this->_queryID); } function _seek($row) { return @sybase_data_seek($this->_queryID, $row); } function _fetch($ignore_fields=false) { if ($this->fetchMode == ADODB_FETCH_NUM) { $this->fields = @sybase_fetch_row($this->_queryID); } else if ($this->fetchMode == ADODB_FETCH_ASSOC) { $this->fields = @sybase_fetch_row($this->_queryID); if (is_array($this->fields)) { $this->fields = $this->GetRowAssoc(ADODB_ASSOC_CASE); return true; } return false; } else { $this->fields = @sybase_fetch_array($this->_queryID); } if ( is_array($this->fields)) { return true; } return false; } /* close() only needs to be called if you are worried about using too much memory while your script is running. All associated result memory for the specified result identifier will automatically be freed. */ function _close() { return @sybase_free_result($this->_queryID); } // sybase/mssql uses a default date like Dec 30 2000 12:00AM function UnixDate($v) { return ADORecordSet_array_sybase::UnixDate($v); } function UnixTimeStamp($v) { return ADORecordSet_array_sybase::UnixTimeStamp($v); } } class ADORecordSet_array_sybase extends ADORecordSet_array { function ADORecordSet_array_sybase($id=-1) { $this->ADORecordSet_array($id); } // sybase/mssql uses a default date like Dec 30 2000 12:00AM function UnixDate($v) { global $ADODB_sybase_mths; //Dec 30 2000 12:00AM if (!ereg( "([A-Za-z]{3})[-/\. ]+([0-9]{1,2})[-/\. ]+([0-9]{4})" ,$v, $rr)) return parent::UnixDate($v); if ($rr[3] <= TIMESTAMP_FIRST_YEAR) return 0; $themth = substr(strtoupper($rr[1]),0,3); $themth = $ADODB_sybase_mths[$themth]; if ($themth <= 0) return false; // h-m-s-MM-DD-YY return mktime(0,0,0,$themth,$rr[2],$rr[3]); } function UnixTimeStamp($v) { global $ADODB_sybase_mths; //11.02.2001 Toni Tunkkari [email protected] //Changed [0-9] to [0-9 ] in day conversion if (!ereg( "([A-Za-z]{3})[-/\. ]([0-9 ]{1,2})[-/\. ]([0-9]{4}) +([0-9]{1,2}):([0-9]{1,2}) *([apAP]{0,1})" ,$v, $rr)) return parent::UnixTimeStamp($v); if ($rr[3] <= TIMESTAMP_FIRST_YEAR) return 0; $themth = substr(strtoupper($rr[1]),0,3); $themth = $ADODB_sybase_mths[$themth]; if ($themth <= 0) return false; switch (strtoupper($rr[6])) { case 'P': if ($rr[4]<12) $rr[4] += 12; break; case 'A': if ($rr[4]==12) $rr[4] = 0; break; default: break; } // h-m-s-MM-DD-YY return mktime($rr[4],$rr[5],0,$themth,$rr[2],$rr[3]); } } ?>
Java
/* * am335x_evm.h * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __CONFIG_AM335X_EVM_H #define __CONFIG_AM335X_EVM_H #include <configs/ti_am335x_common.h> #define MACH_TYPE_TIAM335EVM 3589 /* Until the next sync */ #define CONFIG_MACH_TYPE MACH_TYPE_TIAM335EVM /* Clock Defines */ #define V_OSCK 24000000 /* Clock output from T2 */ #define V_SCLK (V_OSCK) /* Custom script for NOR */ #define CONFIG_SYS_LDSCRIPT "board/ti/am335x/u-boot.lds" /* Always 128 KiB env size */ #define CONFIG_ENV_SIZE (128 << 10) #ifdef CONFIG_NAND #define NANDARGS \ "mtdids=" MTDIDS_DEFAULT "\0" \ "mtdparts=" MTDPARTS_DEFAULT "\0" \ "nandargs=setenv bootargs console=${console} " \ "${optargs} " \ "root=${nandroot} " \ "rootfstype=${nandrootfstype}\0" \ "dfu_alt_info_nand=" DFU_ALT_INFO_NAND "\0" \ "nandroot=ubi0:rootfs rw ubi.mtd=7,2048\0" \ "nandrootfstype=ubifs rootwait=1\0" \ "nandsrcaddr=0x280000\0" \ "nandboot=echo Booting from nand ...; " \ "run nandargs; " \ "nand read ${loadaddr} ${nandsrcaddr} ${nandimgsize}; " \ "bootz ${loadaddr}\0" \ "nandimgsize=0x500000\0" #else #define NANDARGS "" #endif #define CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG #ifndef CONFIG_SPL_BUILD #define CONFIG_EXTRA_ENV_SETTINGS \ "loadaddr=0x80200000\0" \ "fdtaddr=0x80F80000\0" \ "fdt_high=0xffffffff\0" \ "boot_fdt=try\0" \ "rdaddr=0x81000000\0" \ "bootpart=0:2\0" \ "bootdir=/boot\0" \ "bootfile=zImage\0" \ "fdtfile=undefined\0" \ "console=ttyO0,115200n8\0" \ "optargs=\0" \ "dfu_alt_info_mmc=" DFU_ALT_INFO_MMC "\0" \ "dfu_alt_info_emmc=rawemmc mmc 0 3751936\0" \ "mmcdev=0\0" \ "mmcroot=/dev/mmcblk0p2 ro\0" \ "mmcrootfstype=ext4 rootwait\0" \ "rootpath=/export/rootfs\0" \ "nfsopts=nolock\0" \ "static_ip=${ipaddr}:${serverip}:${gatewayip}:${netmask}:${hostname}" \ "::off\0" \ "ramroot=/dev/ram0 rw ramdisk_size=65536 initrd=${rdaddr},64M\0" \ "ramrootfstype=ext2\0" \ "mmcargs=setenv bootargs console=${console} " \ "${optargs} " \ "root=${mmcroot} " \ "rootfstype=${mmcrootfstype}\0" \ "spiroot=/dev/mtdblock4 rw\0" \ "spirootfstype=jffs2\0" \ "spisrcaddr=0xe0000\0" \ "spiimgsize=0x362000\0" \ "spibusno=0\0" \ "spiargs=setenv bootargs console=${console} " \ "${optargs} " \ "root=${spiroot} " \ "rootfstype=${spirootfstype}\0" \ "netargs=setenv bootargs console=${console} " \ "${optargs} " \ "root=/dev/nfs " \ "nfsroot=${serverip}:${rootpath},${nfsopts} rw " \ "ip=dhcp\0" \ "bootenv=uEnv.txt\0" \ "loadbootenv=load mmc ${mmcdev} ${loadaddr} ${bootenv}\0" \ "importbootenv=echo Importing environment from mmc ...; " \ "env import -t $loadaddr $filesize\0" \ "dfu_alt_info_ram=" DFU_ALT_INFO_RAM "\0" \ "ramargs=setenv bootargs console=${console} " \ "${optargs} " \ "root=${ramroot} " \ "rootfstype=${ramrootfstype}\0" \ "loadramdisk=load mmc ${mmcdev} ${rdaddr} ramdisk.gz\0" \ "loadimage=load mmc ${bootpart} ${loadaddr} ${bootdir}/${bootfile}\0" \ "loadfdt=load mmc ${bootpart} ${fdtaddr} ${bootdir}/${fdtfile}\0" \ "mmcloados=run mmcargs; " \ "if test ${boot_fdt} = yes || test ${boot_fdt} = try; then " \ "if run loadfdt; then " \ "bootz ${loadaddr} - ${fdtaddr}; " \ "else " \ "if test ${boot_fdt} = try; then " \ "bootz; " \ "else " \ "echo WARN: Cannot load the DT; " \ "fi; " \ "fi; " \ "else " \ "bootz; " \ "fi;\0" \ "mmcboot=mmc dev ${mmcdev}; " \ "if mmc rescan; then " \ "echo SD/MMC found on device ${mmcdev};" \ "if run loadbootenv; then " \ "echo Loaded environment from ${bootenv};" \ "run importbootenv;" \ "fi;" \ "if test -n $uenvcmd; then " \ "echo Running uenvcmd ...;" \ "run uenvcmd;" \ "fi;" \ "if run loadimage; then " \ "run mmcloados;" \ "fi;" \ "fi;\0" \ "spiboot=echo Booting from spi ...; " \ "run spiargs; " \ "sf probe ${spibusno}:0; " \ "sf read ${loadaddr} ${spisrcaddr} ${spiimgsize}; " \ "bootz ${loadaddr}\0" \ "netboot=echo Booting from network ...; " \ "setenv autoload no; " \ "dhcp; " \ "tftp ${loadaddr} ${bootfile}; " \ "tftp ${fdtaddr} ${fdtfile}; " \ "run netargs; " \ "bootz ${loadaddr} - ${fdtaddr}\0" \ "ramboot=echo Booting from ramdisk ...; " \ "run ramargs; " \ "bootz ${loadaddr} ${rdaddr} ${fdtaddr}\0" \ "findfdt="\ "if test $board_name = A335BONE; then " \ "setenv fdtfile am335x-bone.dtb; fi; " \ "if test $board_name = A335BNLT; then " \ "setenv fdtfile am335x-boneblack.dtb; fi; " \ "if test $board_name = A33515BB; then " \ "setenv fdtfile am335x-evm.dtb; fi; " \ "if test $board_name = A335X_SK; then " \ "setenv fdtfile am335x-evmsk.dtb; fi; " \ "if test $fdtfile = undefined; then " \ "echo WARNING: Could not determine device tree to use; fi; \0" \ NANDARGS #endif #define CONFIG_BOOTCOMMAND \ "run findfdt; " \ "run mmcboot;" \ "setenv mmcdev 1; " \ "setenv bootpart 1:2; " \ "run mmcboot;" \ "run nandboot;" /* NS16550 Configuration */ #define CONFIG_SYS_NS16550_COM1 0x44e09000 /* Base EVM has UART0 */ #define CONFIG_SYS_NS16550_COM2 0x48022000 /* UART1 */ #define CONFIG_SYS_NS16550_COM3 0x48024000 /* UART2 */ #define CONFIG_SYS_NS16550_COM4 0x481a6000 /* UART3 */ #define CONFIG_SYS_NS16550_COM5 0x481a8000 /* UART4 */ #define CONFIG_SYS_NS16550_COM6 0x481aa000 /* UART5 */ #define CONFIG_BAUDRATE 115200 /* I2C Configuration */ #define CONFIG_CMD_EEPROM #define CONFIG_ENV_EEPROM_IS_ON_I2C #define CONFIG_SYS_I2C_EEPROM_ADDR 0x50 /* Main EEPROM */ #define CONFIG_SYS_I2C_EEPROM_ADDR_LEN 2 #define CONFIG_SYS_I2C_MULTI_EEPROMS /* PMIC support */ #define CONFIG_POWER_TPS65217 #define CONFIG_POWER_TPS65910 /* SPL */ #ifndef CONFIG_NOR_BOOT #define CONFIG_SPL_POWER_SUPPORT #define CONFIG_SPL_YMODEM_SUPPORT /* CPSW support */ #define CONFIG_SPL_ETH_SUPPORT /* USB gadget RNDIS */ #define CONFIG_SPL_MUSB_NEW_SUPPORT /* General network SPL, both CPSW and USB gadget RNDIS */ #define CONFIG_SPL_NET_SUPPORT #define CONFIG_SPL_ENV_SUPPORT #define CONFIG_SPL_NET_VCI_STRING "AM335x U-Boot SPL" /* SPI flash. */ #define CONFIG_SPL_SPI_SUPPORT #define CONFIG_SPL_SPI_FLASH_SUPPORT #define CONFIG_SPL_SPI_LOAD #define CONFIG_SPL_SPI_BUS 0 #define CONFIG_SPL_SPI_CS 0 #define CONFIG_SYS_SPI_U_BOOT_OFFS 0x20000 #define CONFIG_SPL_LDSCRIPT "$(CPUDIR)/am33xx/u-boot-spl.lds" #ifdef CONFIG_NAND #define CONFIG_SYS_NAND_5_ADDR_CYCLE #define CONFIG_SYS_NAND_PAGE_COUNT (CONFIG_SYS_NAND_BLOCK_SIZE / \ CONFIG_SYS_NAND_PAGE_SIZE) #define CONFIG_SYS_NAND_PAGE_SIZE 2048 #define CONFIG_SYS_NAND_OOBSIZE 64 #define CONFIG_SYS_NAND_BLOCK_SIZE (128*1024) #define CONFIG_SYS_NAND_BAD_BLOCK_POS NAND_LARGE_BADBLOCK_POS #define CONFIG_SYS_NAND_ECCPOS { 2, 3, 4, 5, 6, 7, 8, 9, \ 10, 11, 12, 13, 14, 15, 16, 17, \ 18, 19, 20, 21, 22, 23, 24, 25, \ 26, 27, 28, 29, 30, 31, 32, 33, \ 34, 35, 36, 37, 38, 39, 40, 41, \ 42, 43, 44, 45, 46, 47, 48, 49, \ 50, 51, 52, 53, 54, 55, 56, 57, } #define CONFIG_SYS_NAND_ECCSIZE 512 #define CONFIG_SYS_NAND_ECCBYTES 14 #define CONFIG_SYS_NAND_U_BOOT_START CONFIG_SYS_TEXT_BASE #define CONFIG_SYS_NAND_U_BOOT_OFFS 0x80000 #endif #endif /* * For NOR boot, we must set this to the start of where NOR is mapped * in memory. */ #ifdef CONFIG_NOR_BOOT #define CONFIG_SYS_TEXT_BASE 0x08000000 #endif /* * USB configuration. We enable MUSB support, both for host and for * gadget. We set USB0 as peripheral and USB1 as host, based on the * board schematic and physical port wired to each. Then for host we * add mass storage support and for gadget we add both RNDIS ethernet * and DFU. */ #define CONFIG_USB_MUSB_DSPS #define CONFIG_ARCH_MISC_INIT #define CONFIG_MUSB_GADGET #define CONFIG_MUSB_PIO_ONLY #define CONFIG_MUSB_DISABLE_BULK_COMBINE_SPLIT #define CONFIG_USB_GADGET #define CONFIG_USBDOWNLOAD_GADGET #define CONFIG_USB_GADGET_DUALSPEED #define CONFIG_USB_GADGET_VBUS_DRAW 2 #define CONFIG_MUSB_HOST #define CONFIG_AM335X_USB0 #define CONFIG_AM335X_USB0_MODE MUSB_PERIPHERAL #define CONFIG_AM335X_USB1 #define CONFIG_AM335X_USB1_MODE MUSB_HOST #ifdef CONFIG_MUSB_HOST #define CONFIG_CMD_USB #define CONFIG_USB_STORAGE #endif #ifdef CONFIG_MUSB_GADGET #define CONFIG_USB_ETHER #define CONFIG_USB_ETH_RNDIS #define CONFIG_USBNET_HOST_ADDR "de:ad:be:af:00:00" /* USB TI's IDs */ #define CONFIG_G_DNL_VENDOR_NUM 0x0403 #define CONFIG_G_DNL_PRODUCT_NUM 0xBD00 #define CONFIG_G_DNL_MANUFACTURER "Texas Instruments" #endif /* CONFIG_MUSB_GADGET */ #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_USBETH_SUPPORT) /* disable host part of MUSB in SPL */ #undef CONFIG_MUSB_HOST /* * Disable CPSW SPL support so we fit within the 101KiB limit. */ #undef CONFIG_SPL_ETH_SUPPORT #endif /* USB Device Firmware Update support */ #define CONFIG_DFU_FUNCTION #define CONFIG_DFU_MMC #define CONFIG_CMD_DFU #define DFU_ALT_INFO_MMC \ "boot part 0 1;" \ "rootfs part 0 2;" \ "MLO fat 0 1;" \ "MLO.raw mmc 100 100;" \ "u-boot.img.raw mmc 300 400;" \ "spl-os-args.raw mmc 80 80;" \ "spl-os-image.raw mmc 900 2000;" \ "spl-os-args fat 0 1;" \ "spl-os-image fat 0 1;" \ "u-boot.img fat 0 1;" \ "uEnv.txt fat 0 1" #ifdef CONFIG_NAND #define CONFIG_DFU_NAND #define DFU_ALT_INFO_NAND \ "SPL part 0 1;" \ "SPL.backup1 part 0 2;" \ "SPL.backup2 part 0 3;" \ "SPL.backup3 part 0 4;" \ "u-boot part 0 5;" \ "u-boot-spl-os part 0 6;" \ "kernel part 0 8;" \ "rootfs part 0 9" #endif #define CONFIG_DFU_RAM #define DFU_ALT_INFO_RAM \ "kernel ram 0x80200000 0xD80000;" \ "fdt ram 0x80F80000 0x80000;" \ "ramdisk ram 0x81000000 0x4000000" /* * Default to using SPI for environment, etc. * 0x000000 - 0x020000 : SPL (128KiB) * 0x020000 - 0x0A0000 : U-Boot (512KiB) * 0x0A0000 - 0x0BFFFF : First copy of U-Boot Environment (128KiB) * 0x0C0000 - 0x0DFFFF : Second copy of U-Boot Environment (128KiB) * 0x0E0000 - 0x442000 : Linux Kernel * 0x442000 - 0x800000 : Userland */ #if defined(CONFIG_SPI_BOOT) #define CONFIG_ENV_IS_IN_SPI_FLASH #define CONFIG_SYS_REDUNDAND_ENVIRONMENT #define CONFIG_ENV_SPI_MAX_HZ CONFIG_SF_DEFAULT_SPEED #define CONFIG_ENV_SECT_SIZE (4 << 10) /* 4 KB sectors */ #define CONFIG_ENV_OFFSET (768 << 10) /* 768 KiB in */ #define CONFIG_ENV_OFFSET_REDUND (896 << 10) /* 896 KiB in */ #define MTDIDS_DEFAULT "nor0=m25p80-flash.0" #define MTDPARTS_DEFAULT "mtdparts=m25p80-flash.0:128k(SPL)," \ "512k(u-boot),128k(u-boot-env1)," \ "128k(u-boot-env2),3464k(kernel)," \ "-(rootfs)" #elif defined(CONFIG_EMMC_BOOT) #undef CONFIG_ENV_IS_NOWHERE #define CONFIG_ENV_IS_IN_MMC #define CONFIG_SYS_MMC_ENV_DEV 1 #define CONFIG_SYS_MMC_ENV_PART 2 #define CONFIG_ENV_OFFSET 0x0 #define CONFIG_ENV_OFFSET_REDUND (CONFIG_ENV_OFFSET + CONFIG_ENV_SIZE) #define CONFIG_SYS_REDUNDAND_ENVIRONMENT #endif /* SPI flash. */ #define CONFIG_CMD_SF #define CONFIG_SPI_FLASH #define CONFIG_SPI_FLASH_WINBOND #define CONFIG_SF_DEFAULT_SPEED 24000000 /* Network. */ #define CONFIG_PHY_GIGE #define CONFIG_PHYLIB #define CONFIG_PHY_ADDR 0 #define CONFIG_PHY_SMSC /* NAND support */ #ifdef CONFIG_NAND #define CONFIG_CMD_NAND #define GPMC_NAND_ECC_LP_x16_LAYOUT 1 #if !defined(CONFIG_SPI_BOOT) && !defined(CONFIG_NOR_BOOT) #define MTDIDS_DEFAULT "nand0=omap2-nand.0" #define MTDPARTS_DEFAULT "mtdparts=omap2-nand.0:128k(SPL)," \ "128k(SPL.backup1)," \ "128k(SPL.backup2)," \ "128k(SPL.backup3),1792k(u-boot)," \ "128k(u-boot-spl-os)," \ "128k(u-boot-env),5m(kernel),-(rootfs)" #define CONFIG_ENV_IS_IN_NAND #define CONFIG_ENV_OFFSET 0x260000 /* environment starts here */ #define CONFIG_SYS_ENV_SECT_SIZE (128 << 10) /* 128 KiB */ #endif #endif /* * NOR Size = 16 MiB * Number of Sectors/Blocks = 128 * Sector Size = 128 KiB * Word length = 16 bits * Default layout: * 0x000000 - 0x07FFFF : U-Boot (512 KiB) * 0x080000 - 0x09FFFF : First copy of U-Boot Environment (128 KiB) * 0x0A0000 - 0x0BFFFF : Second copy of U-Boot Environment (128 KiB) * 0x0C0000 - 0x4BFFFF : Linux Kernel (4 MiB) * 0x4C0000 - 0xFFFFFF : Userland (11 MiB + 256 KiB) */ #if defined(CONFIG_NOR) #undef CONFIG_SYS_NO_FLASH #define CONFIG_CMD_FLASH #define CONFIG_SYS_FLASH_USE_BUFFER_WRITE #define CONFIG_SYS_FLASH_PROTECTION #define CONFIG_SYS_FLASH_CFI #define CONFIG_FLASH_CFI_DRIVER #define CONFIG_FLASH_CFI_MTD #define CONFIG_SYS_MAX_FLASH_SECT 128 #define CONFIG_SYS_MAX_FLASH_BANKS 1 #define CONFIG_SYS_FLASH_BASE (0x08000000) #define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_16BIT #define CONFIG_SYS_MONITOR_BASE CONFIG_SYS_FLASH_BASE #ifdef CONFIG_NOR_BOOT #define CONFIG_ENV_IS_IN_FLASH #define CONFIG_ENV_SECT_SIZE (128 << 10) /* 128 KiB */ #define CONFIG_ENV_OFFSET (512 << 10) /* 512 KiB */ #define CONFIG_ENV_OFFSET_REDUND (768 << 10) /* 768 KiB */ #define MTDIDS_DEFAULT "nor0=physmap-flash.0" #define MTDPARTS_DEFAULT "mtdparts=physmap-flash.0:" \ "512k(u-boot)," \ "128k(u-boot-env1)," \ "128k(u-boot-env2)," \ "4m(kernel),-(rootfs)" #endif #endif /* NOR support */ #endif /* ! __CONFIG_AM335X_EVM_H */
Java
/* * Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Scripts for spells with SPELLFAMILY_DEATHKNIGHT and SPELLFAMILY_GENERIC spells used by deathknight players. * Ordered alphabetically using scriptname. * Scriptnames of files in this file should be prefixed with "spell_dk_". */ #include "ScriptMgr.h" #include "SpellScript.h" #include "SpellAuraEffects.h" #include "Unit.h" #include "Player.h" #include "Pet.h" enum HunterPetCalculate { SPELL_TAMED_PET_PASSIVE_06 = 19591, SPELL_TAMED_PET_PASSIVE_07 = 20784, SPELL_TAMED_PET_PASSIVE_08 = 34666, SPELL_TAMED_PET_PASSIVE_09 = 34667, SPELL_TAMED_PET_PASSIVE_10 = 34675, SPELL_HUNTER_PET_SCALING_01 = 34902, SPELL_HUNTER_PET_SCALING_02 = 34903, SPELL_HUNTER_PET_SCALING_03 = 34904, SPELL_HUNTER_PET_SCALING_04 = 61017, SPELL_HUNTER_ANIMAL_HANDLER = 34453, }; enum WarlockPetCalculate { SPELL_PET_PASSIVE_CRIT = 35695, SPELL_PET_PASSIVE_DAMAGE_TAKEN = 35697, SPELL_WARLOCK_PET_SCALING_01 = 34947, SPELL_WARLOCK_PET_SCALING_02 = 34956, SPELL_WARLOCK_PET_SCALING_03 = 34957, SPELL_WARLOCK_PET_SCALING_04 = 34958, SPELL_WARLOCK_PET_SCALING_05 = 61013, ENTRY_FELGUARD = 17252, ENTRY_VOIDWALKER = 1860, ENTRY_FELHUNTER = 417, ENTRY_SUCCUBUS = 1863, ENTRY_IMP = 416, SPELL_WARLOCK_GLYPH_OF_VOIDWALKER = 56247, }; enum DKPetCalculate { SPELL_DEATH_KNIGHT_RUNE_WEAPON_02 = 51906, SPELL_DEATH_KNIGHT_PET_SCALING_01 = 54566, SPELL_DEATH_KNIGHT_PET_SCALING_02 = 51996, SPELL_DEATH_KNIGHT_PET_SCALING_03 = 61697, SPELL_NIGHT_OF_THE_DEAD = 55620, ENTRY_ARMY_OF_THE_DEAD_GHOUL = 24207, SPELL_DEATH_KNIGHT_GLYPH_OF_GHOUL = 58686, }; enum ShamanPetCalculate { SPELL_FERAL_SPIRIT_PET_UNK_01 = 35674, SPELL_FERAL_SPIRIT_PET_UNK_02 = 35675, SPELL_FERAL_SPIRIT_PET_UNK_03 = 35676, SPELL_FERAL_SPIRIT_PET_SCALING_04 = 61783, }; enum MiscPetCalculate { SPELL_MAGE_PET_PASSIVE_ELEMENTAL = 44559, SPELL_PET_HEALTH_SCALING = 61679, SPELL_PET_UNK_01 = 67561, SPELL_PET_UNK_02 = 67557, }; class spell_gen_pet_calculate : public SpellScriptLoader { public: spell_gen_pet_calculate() : SpellScriptLoader("spell_gen_pet_calculate") { } class spell_gen_pet_calculate_AuraScript : public AuraScript { PrepareAuraScript(spell_gen_pet_calculate_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountCritSpell(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritSpell = 0.0f; // Crit from Intellect CritSpell += owner->GetSpellCritFromIntellect(); // Increase crit from SPELL_AURA_MOD_SPELL_CRIT_CHANCE CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_CRIT_CHANCE); // Increase crit from SPELL_AURA_MOD_CRIT_PCT CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit spell from spell crit ratings CritSpell += owner->GetRatingBonusValue(CR_CRIT_SPELL); amount += int32(CritSpell); } } void CalculateAmountCritMelee(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritMelee = 0.0f; // Crit from Agility CritMelee += owner->GetMeleeCritFromAgility(); // Increase crit from SPELL_AURA_MOD_WEAPON_CRIT_PERCENT CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); // Increase crit from SPELL_AURA_MOD_CRIT_PCT CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit melee from melee crit ratings CritMelee += owner->GetRatingBonusValue(CR_CRIT_MELEE); amount += int32(CritMelee); } } void CalculateAmountMeleeHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HitMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_HIT_CHANCE); // Increase hit melee from meele hit ratings HitMelee += owner->GetRatingBonusValue(CR_HIT_MELEE); amount += int32(HitMelee); } } void CalculateAmountSpellHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitSpell = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitSpell += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitSpell); } } void CalculateAmountExpertise(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float Expertise = 0.0f; // Increase hit from SPELL_AURA_MOD_EXPERTISE Expertise += owner->GetTotalAuraModifier(SPELL_AURA_MOD_EXPERTISE); // Increase Expertise from Expertise ratings Expertise += owner->GetRatingBonusValue(CR_EXPERTISE); amount += int32(Expertise); } } void Register() OVERRIDE { switch (m_scriptSpellId) { case SPELL_TAMED_PET_PASSIVE_06: DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountCritMelee, EFFECT_0, SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountCritSpell, EFFECT_1, SPELL_AURA_MOD_SPELL_CRIT_CHANCE); break; case SPELL_PET_PASSIVE_CRIT: DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountCritSpell, EFFECT_0, SPELL_AURA_MOD_SPELL_CRIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountCritMelee, EFFECT_1, SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); break; case SPELL_WARLOCK_PET_SCALING_05: case SPELL_HUNTER_PET_SCALING_04: DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountExpertise, EFFECT_2, SPELL_AURA_MOD_EXPERTISE); break; case SPELL_DEATH_KNIGHT_PET_SCALING_03: // case SPELL_SHAMAN_PET_HIT: DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_gen_pet_calculate_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); break; default: break; } } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_gen_pet_calculate_AuraScript(); } }; class spell_warl_pet_scaling_01 : public SpellScriptLoader { public: spell_warl_pet_scaling_01() : SpellScriptLoader("spell_warl_pet_scaling_01") { } class spell_warl_pet_scaling_01_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_scaling_01_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; _tempBonus = 0; return true; } void CalculateStaminaAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = CalculatePct(owner->GetStat(STAT_STAMINA), 75); amount += ownerBonus; } } void ApplyEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (_tempBonus) { PetLevelInfo const* pInfo = sObjectMgr->GetPetLevelInfo(pet->GetEntry(), pet->getLevel()); uint32 healthMod = 0; uint32 baseHealth = pInfo->health; switch (pet->GetEntry()) { case ENTRY_IMP: healthMod = uint32(_tempBonus * 8.4f); break; case ENTRY_FELGUARD: case ENTRY_VOIDWALKER: healthMod = _tempBonus * 11; break; case ENTRY_SUCCUBUS: healthMod = uint32(_tempBonus * 9.1f); break; case ENTRY_FELHUNTER: healthMod = uint32(_tempBonus * 9.5f); break; default: healthMod = 0; break; } if (healthMod) pet->ToPet()->SetCreateHealth(baseHealth + healthMod); } } void RemoveEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) { PetLevelInfo const* pInfo = sObjectMgr->GetPetLevelInfo(pet->GetEntry(), pet->getLevel()); pet->ToPet()->SetCreateHealth(pInfo->health); } } void CalculateAttackPowerAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { int32 fire = int32(owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_POS + SPELL_SCHOOL_FIRE)) - owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_NEG + SPELL_SCHOOL_FIRE); int32 shadow = int32(owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_POS + SPELL_SCHOOL_SHADOW)) - owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_NEG + SPELL_SCHOOL_SHADOW); int32 maximum = (fire > shadow) ? fire : shadow; if (maximum < 0) maximum = 0; float bonusAP = maximum * 0.57f; amount += bonusAP; // Glyph of felguard if (pet->GetEntry() == ENTRY_FELGUARD) { if (AuraEffect* /* aurEff */ect = owner->GetAuraEffect(56246, EFFECT_0)) { float base_attPower = pet->GetModifierValue(UNIT_MOD_ATTACK_POWER, BASE_VALUE) * pet->GetModifierValue(UNIT_MOD_ATTACK_POWER, BASE_PCT); amount += CalculatePct(amount+base_attPower, /* aurEff */ect->GetAmount()); } } } } void CalculateDamageDoneAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { //the damage bonus used for pets is either fire or shadow damage, whatever is higher int32 fire = int32(owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_POS + SPELL_SCHOOL_FIRE)) - owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_NEG + SPELL_SCHOOL_FIRE); int32 shadow = int32(owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_POS + SPELL_SCHOOL_SHADOW)) - owner->GetUInt32Value(PLAYER_FIELD_MOD_DAMAGE_DONE_NEG + SPELL_SCHOOL_SHADOW); int32 maximum = (fire > shadow) ? fire : shadow; float bonusDamage = 0.0f; if (maximum > 0) bonusDamage = maximum * 0.15f; amount += bonusDamage; } } void Register() OVERRIDE { OnEffectRemove += AuraEffectRemoveFn(spell_warl_pet_scaling_01_AuraScript::RemoveEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); AfterEffectApply += AuraEffectApplyFn(spell_warl_pet_scaling_01_AuraScript::ApplyEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_01_AuraScript::CalculateStaminaAmount, EFFECT_0, SPELL_AURA_MOD_STAT); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_01_AuraScript::CalculateAttackPowerAmount, EFFECT_1, SPELL_AURA_MOD_ATTACK_POWER); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_01_AuraScript::CalculateDamageDoneAmount, EFFECT_2, SPELL_AURA_MOD_DAMAGE_DONE); } private: uint32 _tempBonus; }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_scaling_01_AuraScript(); } }; class spell_warl_pet_scaling_02 : public SpellScriptLoader { public: spell_warl_pet_scaling_02() : SpellScriptLoader("spell_warl_pet_scaling_02") { } class spell_warl_pet_scaling_02_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_scaling_02_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; _tempBonus = 0; return true; } void CalculateIntellectAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetStat(STAT_INTELLECT), 30); amount += ownerBonus; _tempBonus = ownerBonus; } } void ApplyEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (_tempBonus) { PetLevelInfo const* pInfo = sObjectMgr->GetPetLevelInfo(pet->GetEntry(), pet->getLevel()); uint32 manaMod = 0; uint32 baseMana = pInfo->mana; switch (pet->GetEntry()) { case ENTRY_IMP: manaMod = uint32(_tempBonus * 4.9f); break; case ENTRY_VOIDWALKER: case ENTRY_SUCCUBUS: case ENTRY_FELHUNTER: case ENTRY_FELGUARD: manaMod = uint32(_tempBonus * 11.5f); break; default: manaMod = 0; break; } if (manaMod) pet->ToPet()->SetCreateMana(baseMana + manaMod); } } void RemoveEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) { PetLevelInfo const* pInfo = sObjectMgr->GetPetLevelInfo(pet->GetEntry(), pet->getLevel()); pet->ToPet()->SetCreateMana(pInfo->mana); } } void CalculateArmorAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetArmor(), 35); amount += ownerBonus; } } void CalculateFireResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_FIRE), 40); amount += ownerBonus; } } void Register() OVERRIDE { OnEffectRemove += AuraEffectRemoveFn(spell_warl_pet_scaling_02_AuraScript::RemoveEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); AfterEffectApply += AuraEffectApplyFn(spell_warl_pet_scaling_02_AuraScript::ApplyEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_02_AuraScript::CalculateIntellectAmount, EFFECT_0, SPELL_AURA_MOD_STAT); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_02_AuraScript::CalculateArmorAmount, EFFECT_1, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_02_AuraScript::CalculateFireResistanceAmount, EFFECT_2, SPELL_AURA_MOD_RESISTANCE); } private: uint32 _tempBonus; }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_scaling_02_AuraScript(); } }; class spell_warl_pet_scaling_03 : public SpellScriptLoader { public: spell_warl_pet_scaling_03() : SpellScriptLoader("spell_warl_pet_scaling_03") { } class spell_warl_pet_scaling_03_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_scaling_03_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateFrostResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_FROST), 40); amount += ownerBonus; } } void CalculateArcaneResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_ARCANE), 40); amount += ownerBonus; } } void CalculateNatureResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_NATURE), 40); amount += ownerBonus; } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_03_AuraScript::CalculateFrostResistanceAmount, EFFECT_0, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_03_AuraScript::CalculateArcaneResistanceAmount, EFFECT_1, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_03_AuraScript::CalculateNatureResistanceAmount, EFFECT_2, SPELL_AURA_MOD_RESISTANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_scaling_03_AuraScript(); } }; class spell_warl_pet_scaling_04 : public SpellScriptLoader { public: spell_warl_pet_scaling_04() : SpellScriptLoader("spell_warl_pet_scaling_04") { } class spell_warl_pet_scaling_04_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_scaling_04_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateShadowResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_SHADOW), 40); amount += ownerBonus; } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_04_AuraScript::CalculateShadowResistanceAmount, EFFECT_0, SPELL_AURA_MOD_RESISTANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_scaling_04_AuraScript(); } }; class spell_warl_pet_scaling_05 : public SpellScriptLoader { public: spell_warl_pet_scaling_05() : SpellScriptLoader("spell_warl_pet_scaling_05") { } class spell_warl_pet_scaling_05_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_scaling_05_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountMeleeHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitMelee += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitMelee); } } void CalculateAmountSpellHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitSpell = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitSpell += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitSpell); } } void CalculateAmountExpertise(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float Expertise = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE Expertise += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings Expertise += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(Expertise); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_05_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_05_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_scaling_05_AuraScript::CalculateAmountExpertise, EFFECT_2, SPELL_AURA_MOD_EXPERTISE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_scaling_05_AuraScript(); } }; class spell_warl_pet_passive : public SpellScriptLoader { public: spell_warl_pet_passive() : SpellScriptLoader("spell_warl_pet_passive") { } class spell_warl_pet_passive_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_passive_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountCritSpell(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritSpell = 0.0f; // Crit from Intellect CritSpell += owner->GetSpellCritFromIntellect(); // Increase crit from SPELL_AURA_MOD_SPELL_CRIT_CHANCE CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_CRIT_CHANCE); // Increase crit from SPELL_AURA_MOD_CRIT_PCT CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit spell from spell crit ratings CritSpell += owner->GetRatingBonusValue(CR_CRIT_SPELL); if (AuraApplication* improvedDemonicTacticsApp = owner->GetAuraApplicationOfRankedSpell(54347)) if (Aura* improvedDemonicTactics = improvedDemonicTacticsApp->GetBase()) if (AuraEffect* improvedDemonicTacticsEffect = improvedDemonicTactics->GetEffect(EFFECT_0)) amount += CalculatePct(CritSpell, improvedDemonicTacticsEffect->GetAmount()); } } void CalculateAmountCritMelee(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritMelee = 0.0f; // Crit from Agility CritMelee += owner->GetMeleeCritFromAgility(); // Increase crit from SPELL_AURA_MOD_WEAPON_CRIT_PERCENT CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); // Increase crit from SPELL_AURA_MOD_CRIT_PCT CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit melee from melee crit ratings CritMelee += owner->GetRatingBonusValue(CR_CRIT_MELEE); if (AuraApplication* improvedDemonicTacticsApp = owner->GetAuraApplicationOfRankedSpell(54347)) if (Aura* improvedDemonicTactics = improvedDemonicTacticsApp->GetBase()) if (AuraEffect* improvedDemonicTacticsEffect = improvedDemonicTactics->GetEffect(EFFECT_0)) amount += CalculatePct(CritMelee, improvedDemonicTacticsEffect->GetAmount()); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_passive_AuraScript::CalculateAmountCritSpell, EFFECT_0, SPELL_AURA_MOD_SPELL_CRIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_passive_AuraScript::CalculateAmountCritMelee, EFFECT_1, SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_passive_AuraScript(); } }; // this doesnt actually fit in here class spell_warl_pet_passive_damage_done : public SpellScriptLoader { public: spell_warl_pet_passive_damage_done() : SpellScriptLoader("spell_warl_pet_passive_damage_done") { } class spell_warl_pet_passive_damage_done_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_passive_damage_done_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountDamageDone(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (GetCaster()->GetOwner()->ToPlayer()) { switch (GetCaster()->GetEntry()) { case ENTRY_VOIDWALKER: amount += -16; break; case ENTRY_FELHUNTER: amount += -20; break; case ENTRY_SUCCUBUS: case ENTRY_FELGUARD: amount += 5; break; } } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_passive_damage_done_AuraScript::CalculateAmountDamageDone, EFFECT_0, SPELL_AURA_MOD_DAMAGE_PERCENT_DONE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_passive_damage_done_AuraScript::CalculateAmountDamageDone, EFFECT_1, SPELL_AURA_MOD_DAMAGE_PERCENT_DONE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_passive_damage_done_AuraScript(); } }; class spell_warl_pet_passive_voidwalker : public SpellScriptLoader { public: spell_warl_pet_passive_voidwalker() : SpellScriptLoader("spell_warl_pet_passive_voidwalker") { } class spell_warl_pet_passive_voidwalker_AuraScript : public AuraScript { PrepareAuraScript(spell_warl_pet_passive_voidwalker_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) if (AuraEffect* /* aurEff */ect = owner->GetAuraEffect(SPELL_WARLOCK_GLYPH_OF_VOIDWALKER, EFFECT_0)) amount += /* aurEff */ect->GetAmount(); } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_warl_pet_passive_voidwalker_AuraScript::CalculateAmount, EFFECT_0, SPELL_AURA_MOD_TOTAL_STAT_PERCENTAGE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_warl_pet_passive_voidwalker_AuraScript(); } }; class spell_sha_pet_scaling_04 : public SpellScriptLoader { public: spell_sha_pet_scaling_04() : SpellScriptLoader("spell_sha_pet_scaling_04") { } class spell_sha_pet_scaling_04_AuraScript : public AuraScript { PrepareAuraScript(spell_sha_pet_scaling_04_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountMeleeHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HitMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_HIT_CHANCE); // Increase hit melee from meele hit ratings HitMelee += owner->GetRatingBonusValue(CR_HIT_MELEE); amount += int32(HitMelee); } } void CalculateAmountSpellHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitSpell = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitSpell += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitSpell); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_sha_pet_scaling_04_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_sha_pet_scaling_04_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_sha_pet_scaling_04_AuraScript(); } }; class spell_hun_pet_scaling_01 : public SpellScriptLoader { public: spell_hun_pet_scaling_01() : SpellScriptLoader("spell_hun_pet_scaling_01") { } class spell_hun_pet_scaling_01_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_scaling_01_AuraScript); void CalculateStaminaAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) if (pet->IsPet()) if (Unit* owner = pet->ToPet()->GetOwner()) { float mod = 0.45f; float ownerBonus = 0.0f; PetSpellMap::const_iterator itr = (pet->ToPet()->m_spells.find(62758)); // Wild Hunt rank 1 if (itr == pet->ToPet()->m_spells.end()) itr = pet->ToPet()->m_spells.find(62762); // Wild Hunt rank 2 if (itr != pet->ToPet()->m_spells.end()) // If pet has Wild Hunt { SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(itr->first); // Then get the SpellProto and add the dummy effect value AddPct(mod, spellInfo->Effects[EFFECT_0].CalcValue()); } ownerBonus = owner->GetStat(STAT_STAMINA)*mod; amount += ownerBonus; } } void ApplyEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (_tempHealth) pet->SetHealth(_tempHealth); } void RemoveEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) _tempHealth = pet->GetHealth(); } void CalculateAttackPowerAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float mod = 1.0f; //Hunter contribution modifier float bonusAP = 0.0f; PetSpellMap::const_iterator itr = (pet->ToPet()->m_spells.find(62758)); // Wild Hunt rank 1 if (itr == pet->ToPet()->m_spells.end()) itr = pet->ToPet()->m_spells.find(62762); // Wild Hunt rank 2 if (itr != pet->ToPet()->m_spells.end()) // If pet has Wild Hunt { SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(itr->first); // Then get the SpellProto and add the dummy effect value mod += CalculatePct(1.0f, spellInfo->Effects[EFFECT_1].CalcValue()); } bonusAP = owner->GetTotalAttackPowerValue(RANGED_ATTACK) * 0.22f * mod; amount += bonusAP; } } void CalculateDamageDoneAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float mod = 1.0f; //Hunter contribution modifier float bonusDamage = 0.0f; PetSpellMap::const_iterator itr = (pet->ToPet()->m_spells.find(62758)); // Wild Hunt rank 1 if (itr == pet->ToPet()->m_spells.end()) itr = pet->ToPet()->m_spells.find(62762); // Wild Hunt rank 2 if (itr != pet->ToPet()->m_spells.end()) // If pet has Wild Hunt { SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(itr->first); // Then get the SpellProto and add the dummy effect value mod += CalculatePct(1.0f, spellInfo->Effects[EFFECT_1].CalcValue()); } bonusDamage = owner->GetTotalAttackPowerValue(RANGED_ATTACK) * 0.1287f * mod; amount += bonusDamage; } } void Register() OVERRIDE { OnEffectRemove += AuraEffectRemoveFn(spell_hun_pet_scaling_01_AuraScript::RemoveEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); AfterEffectApply += AuraEffectApplyFn(spell_hun_pet_scaling_01_AuraScript::ApplyEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_01_AuraScript::CalculateStaminaAmount, EFFECT_0, SPELL_AURA_MOD_STAT); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_01_AuraScript::CalculateAttackPowerAmount, EFFECT_1, SPELL_AURA_MOD_ATTACK_POWER); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_01_AuraScript::CalculateDamageDoneAmount, EFFECT_2, SPELL_AURA_MOD_DAMAGE_DONE); } private: uint32 _tempHealth; }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_scaling_01_AuraScript(); } }; class spell_hun_pet_scaling_02 : public SpellScriptLoader { public: spell_hun_pet_scaling_02() : SpellScriptLoader("spell_hun_pet_scaling_02") { } class spell_hun_pet_scaling_02_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_scaling_02_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateFrostResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_FROST), 40); amount += ownerBonus; } } void CalculateFireResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_FIRE), 40); amount += ownerBonus; } } void CalculateNatureResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_NATURE), 40); amount += ownerBonus; } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_02_AuraScript::CalculateFrostResistanceAmount, EFFECT_1, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_02_AuraScript::CalculateFireResistanceAmount, EFFECT_0, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_02_AuraScript::CalculateNatureResistanceAmount, EFFECT_2, SPELL_AURA_MOD_RESISTANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_scaling_02_AuraScript(); } }; class spell_hun_pet_scaling_03 : public SpellScriptLoader { public: spell_hun_pet_scaling_03() : SpellScriptLoader("spell_hun_pet_scaling_03") { } class spell_hun_pet_scaling_03_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_scaling_03_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateShadowResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_SHADOW), 40); amount += ownerBonus; } } void CalculateArcaneResistanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetResistance(SPELL_SCHOOL_ARCANE), 40); amount += ownerBonus; } } void CalculateArmorAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsPet()) return; Unit* owner = pet->ToPet()->GetOwner(); if (!owner) return; float ownerBonus = 0.0f; ownerBonus = CalculatePct(owner->GetArmor(), 35); amount += ownerBonus; } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_03_AuraScript::CalculateShadowResistanceAmount, EFFECT_0, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_03_AuraScript::CalculateArcaneResistanceAmount, EFFECT_1, SPELL_AURA_MOD_RESISTANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_03_AuraScript::CalculateArmorAmount, EFFECT_2, SPELL_AURA_MOD_RESISTANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_scaling_03_AuraScript(); } }; class spell_hun_pet_scaling_04 : public SpellScriptLoader { public: spell_hun_pet_scaling_04() : SpellScriptLoader("spell_hun_pet_scaling_04") { } class spell_hun_pet_scaling_04_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_scaling_04_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountMeleeHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HitMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_HIT_CHANCE); // Increase hit melee from meele hit ratings HitMelee += owner->GetRatingBonusValue(CR_HIT_MELEE); amount += int32(HitMelee); } } void CalculateAmountSpellHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitSpell = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitSpell += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitSpell); } } void CalculateAmountExpertise(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float Expertise = 0.0f; // Increase hit from SPELL_AURA_MOD_EXPERTISE Expertise += owner->GetTotalAuraModifier(SPELL_AURA_MOD_EXPERTISE); // Increase Expertise from Expertise ratings Expertise += owner->GetRatingBonusValue(CR_EXPERTISE); amount += int32(Expertise); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_04_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_04_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_scaling_04_AuraScript::CalculateAmountExpertise, EFFECT_2, SPELL_AURA_MOD_EXPERTISE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_scaling_04_AuraScript(); } }; class spell_hun_pet_passive_crit : public SpellScriptLoader { public: spell_hun_pet_passive_crit() : SpellScriptLoader("spell_hun_pet_passive_crit") { } class spell_hun_pet_passive_crit_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_passive_crit_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountCritSpell(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritSpell = 0.0f; // Crit from Intellect // CritSpell += owner->GetSpellCritFromIntellect(); // Increase crit from SPELL_AURA_MOD_SPELL_CRIT_CHANCE // CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_CRIT_CHANCE); // Increase crit from SPELL_AURA_MOD_CRIT_PCT // CritSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit spell from spell crit ratings // CritSpell += owner->GetRatingBonusValue(CR_CRIT_SPELL); amount += (CritSpell*0.8f); } } void CalculateAmountCritMelee(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float CritMelee = 0.0f; // Crit from Agility // CritMelee += owner->GetMeleeCritFromAgility(); // Increase crit from SPELL_AURA_MOD_WEAPON_CRIT_PERCENT // CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); // Increase crit from SPELL_AURA_MOD_CRIT_PCT // CritMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_CRIT_PCT); // Increase crit melee from melee crit ratings // CritMelee += owner->GetRatingBonusValue(CR_CRIT_MELEE); amount += (CritMelee*0.8f); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_passive_crit_AuraScript::CalculateAmountCritSpell, EFFECT_1, SPELL_AURA_MOD_SPELL_CRIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_passive_crit_AuraScript::CalculateAmountCritMelee, EFFECT_0, SPELL_AURA_MOD_WEAPON_CRIT_PERCENT); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_passive_crit_AuraScript(); } }; class spell_hun_pet_passive_damage_done : public SpellScriptLoader { public: spell_hun_pet_passive_damage_done() : SpellScriptLoader("spell_hun_pet_passive_damage_done") { } class spell_hun_pet_passive_damage_done_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_pet_passive_damage_done_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountDamageDone(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (GetCaster()->GetOwner()->ToPlayer()) { // Pet's base damage changes depending on happiness if (GetCaster()->IsPet() && GetCaster()->ToPet()->IsHunterPet()) { switch (GetCaster()->ToPet()->GetHappinessState()) { case HAPPY: // 125% of normal damage amount += 25.0f; break; case CONTENT: // 100% of normal damage, nothing to modify break; case UNHAPPY: // 75% of normal damage amount += -25.0f; break; } } // Cobra Reflexes if (AuraEffect* cobraReflexes = GetCaster()->GetAuraEffectOfRankedSpell(61682, EFFECT_0)) amount -= cobraReflexes->GetAmount(); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_pet_passive_damage_done_AuraScript::CalculateAmountDamageDone, EFFECT_0, SPELL_AURA_MOD_DAMAGE_PERCENT_DONE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_pet_passive_damage_done_AuraScript(); } }; class spell_hun_animal_handler : public SpellScriptLoader { public: spell_hun_animal_handler() : SpellScriptLoader("spell_hun_animal_handler") { } class spell_hun_animal_handler_AuraScript : public AuraScript { PrepareAuraScript(spell_hun_animal_handler_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountDamageDone(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { if (AuraEffect* /* aurEff */ect = owner->GetAuraEffectOfRankedSpell(SPELL_HUNTER_ANIMAL_HANDLER, EFFECT_1)) amount = /* aurEff */ect->GetAmount(); else amount = 0; } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_hun_animal_handler_AuraScript::CalculateAmountDamageDone, EFFECT_0, SPELL_AURA_MOD_ATTACK_POWER_PCT); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_hun_animal_handler_AuraScript(); } }; class spell_dk_avoidance_passive : public SpellScriptLoader { public: spell_dk_avoidance_passive() : SpellScriptLoader("spell_dk_avoidance_passive") { } class spell_dk_avoidance_passive_AuraScript : public AuraScript { PrepareAuraScript(spell_dk_avoidance_passive_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAvoidanceAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (Unit* owner = pet->GetOwner()) { // Army of the dead ghoul if (pet->GetEntry() == ENTRY_ARMY_OF_THE_DEAD_GHOUL) amount = -90; // Night of the dead else if (Aura* aur = owner->GetAuraOfRankedSpell(SPELL_NIGHT_OF_THE_DEAD)) amount = aur->GetSpellInfo()->Effects[EFFECT_2].CalcValue(); } } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_avoidance_passive_AuraScript::CalculateAvoidanceAmount, EFFECT_0, SPELL_AURA_MOD_CREATURE_AOE_DAMAGE_AVOIDANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_dk_avoidance_passive_AuraScript(); } }; class spell_dk_pet_scaling_01 : public SpellScriptLoader { public: spell_dk_pet_scaling_01() : SpellScriptLoader("spell_dk_pet_scaling_01") { } class spell_dk_pet_scaling_01_AuraScript : public AuraScript { PrepareAuraScript(spell_dk_pet_scaling_01_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; _tempHealth = 0; return true; } void CalculateStaminaAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (pet->IsGuardian()) { if (Unit* owner = pet->GetOwner()) { float mod = 0.3f; // Ravenous Dead. Check just if owner has Ravenous Dead since it's effect is not an aura if (AuraEffect const* aurEff = owner->GetAuraEffect(SPELL_AURA_MOD_TOTAL_STAT_PERCENTAGE, SPELLFAMILY_DEATHKNIGHT, 3010, 0)) mod += aurEff->GetSpellInfo()->Effects[EFFECT_1].CalcValue()/100; // Ravenous Dead edits the original scale // Glyph of the Ghoul if (AuraEffect const* aurEff = owner->GetAuraEffect(SPELL_DEATH_KNIGHT_GLYPH_OF_GHOUL, 0)) mod += aurEff->GetAmount()/100; float ownerBonus = float(owner->GetStat(STAT_STAMINA)) * mod; amount += ownerBonus; } } } } void ApplyEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) if (_tempHealth) pet->SetHealth(_tempHealth); } void RemoveEffect(AuraEffect const* /* aurEff */, AuraEffectHandleModes /*mode*/) { if (Unit* pet = GetUnitOwner()) _tempHealth = pet->GetHealth(); } void CalculateStrengthAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { if (!pet->IsGuardian()) return; Unit* owner = pet->GetOwner(); if (!owner) return; float mod = 0.7f; // Ravenous Dead AuraEffect const* aurEff = NULL; // Check just if owner has Ravenous Dead since it's effect is not an aura aurEff = owner->GetAuraEffect(SPELL_AURA_MOD_TOTAL_STAT_PERCENTAGE, SPELLFAMILY_DEATHKNIGHT, 3010, 0); if (aurEff) { mod += CalculatePct(mod, aurEff->GetSpellInfo()->Effects[EFFECT_1].CalcValue()); // Ravenous Dead edits the original scale } // Glyph of the Ghoul aurEff = owner->GetAuraEffect(58686, 0); if (aurEff) mod += CalculatePct(1.0f, aurEff->GetAmount()); // Glyph of the Ghoul adds a flat value to the scale mod float ownerBonus = float(owner->GetStat(STAT_STRENGTH)) * mod; amount += ownerBonus; } } void Register() OVERRIDE { OnEffectRemove += AuraEffectRemoveFn(spell_dk_pet_scaling_01_AuraScript::RemoveEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); AfterEffectApply += AuraEffectApplyFn(spell_dk_pet_scaling_01_AuraScript::ApplyEffect, EFFECT_0, SPELL_AURA_MOD_STAT, AURA_EFFECT_HANDLE_CHANGE_AMOUNT_MASK); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_pet_scaling_01_AuraScript::CalculateStaminaAmount, EFFECT_0, SPELL_AURA_MOD_STAT); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_pet_scaling_01_AuraScript::CalculateStrengthAmount, EFFECT_1, SPELL_AURA_MOD_STAT); } private: uint32 _tempHealth; }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_dk_pet_scaling_01_AuraScript(); } }; class spell_dk_pet_scaling_02 : public SpellScriptLoader { public: spell_dk_pet_scaling_02() : SpellScriptLoader("spell_dk_pet_scaling_02") { } class spell_dk_pet_scaling_02_AuraScript : public AuraScript { PrepareAuraScript(spell_dk_pet_scaling_02_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountMeleeHaste(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HasteMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HasteMelee += (1-owner->m_modAttackSpeedPct[BASE_ATTACK])*100; amount += int32(HasteMelee); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_pet_scaling_02_AuraScript::CalculateAmountMeleeHaste, EFFECT_1, SPELL_AURA_MELEE_SLOW); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_dk_pet_scaling_02_AuraScript(); } }; class spell_dk_pet_scaling_03 : public SpellScriptLoader { public: spell_dk_pet_scaling_03() : SpellScriptLoader("spell_dk_pet_scaling_03") { } class spell_dk_pet_scaling_03_AuraScript : public AuraScript { PrepareAuraScript(spell_dk_pet_scaling_03_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateAmountMeleeHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HitMelee += owner->GetTotalAuraModifier(SPELL_AURA_MOD_HIT_CHANCE); // Increase hit melee from meele hit ratings HitMelee += owner->GetRatingBonusValue(CR_HIT_MELEE); amount += int32(HitMelee); } } void CalculateAmountSpellHit(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HitSpell = 0.0f; // Increase hit from SPELL_AURA_MOD_SPELL_HIT_CHANCE HitSpell += owner->GetTotalAuraModifier(SPELL_AURA_MOD_SPELL_HIT_CHANCE); // Increase hit spell from spell hit ratings HitSpell += owner->GetRatingBonusValue(CR_HIT_SPELL); amount += int32(HitSpell); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_pet_scaling_03_AuraScript::CalculateAmountMeleeHit, EFFECT_0, SPELL_AURA_MOD_HIT_CHANCE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_pet_scaling_03_AuraScript::CalculateAmountSpellHit, EFFECT_1, SPELL_AURA_MOD_SPELL_HIT_CHANCE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_dk_pet_scaling_03_AuraScript(); } }; class spell_dk_rune_weapon_scaling_02 : public SpellScriptLoader { public: spell_dk_rune_weapon_scaling_02() : SpellScriptLoader("spell_dk_rune_weapon_scaling_02") { } class spell_dk_rune_weapon_scaling_02_AuraScript : public AuraScript { PrepareAuraScript(spell_dk_rune_weapon_scaling_02_AuraScript); bool Load() OVERRIDE { if (!GetCaster() || !GetCaster()->GetOwner() || GetCaster()->GetOwner()->GetTypeId() != TYPEID_PLAYER) return false; return true; } void CalculateDamageDoneAmount(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* pet = GetUnitOwner()) { Unit* owner = pet->GetOwner(); if (!owner) return; if (pet->IsGuardian()) ((Guardian*)pet)->SetBonusDamage(owner->GetTotalAttackPowerValue(BASE_ATTACK)); amount += owner->CalculateDamage(BASE_ATTACK, true, true); } } void CalculateAmountMeleeHaste(AuraEffect const* /* aurEff */, int32& amount, bool& /*canBeRecalculated*/) { if (!GetCaster() || !GetCaster()->GetOwner()) return; if (Player* owner = GetCaster()->GetOwner()->ToPlayer()) { // For others recalculate it from: float HasteMelee = 0.0f; // Increase hit from SPELL_AURA_MOD_HIT_CHANCE HasteMelee += (1-owner->m_modAttackSpeedPct[BASE_ATTACK])*100; amount += int32(HasteMelee); } } void Register() OVERRIDE { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_rune_weapon_scaling_02_AuraScript::CalculateDamageDoneAmount, EFFECT_0, SPELL_AURA_MOD_DAMAGE_DONE); DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_dk_rune_weapon_scaling_02_AuraScript::CalculateAmountMeleeHaste, EFFECT_1, SPELL_AURA_MELEE_SLOW); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_dk_rune_weapon_scaling_02_AuraScript(); } }; void AddSC_pet_spell_scripts() { new spell_gen_pet_calculate(); }
Java
/* * linux/drivers/mmc/core/core.c * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/pagemap.h> #include <linux/err.h> #include <linux/leds.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeup.h> #include <linux/suspend.h> #include <linux/fault-inject.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/wakelock.h> #include <trace/events/mmc.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <linux/mmc/slot-gpio.h> #include "core.h" #include "bus.h" #include "host.h" #include "sdio_bus.h" #include "mmc_ops.h" #include "sd_ops.h" #include "sdio_ops.h" /* If the device is not responding */ #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ /* * Background operations can take a long time, depending on the housekeeping * operations the card has to perform. */ #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ static struct workqueue_struct *workqueue; static struct wake_lock mmc_delayed_work_wake_lock; static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; /* * Enabling software CRCs on the data blocks can be a significant (30%) * performance cost, and for other reasons may not always be desired. * So we allow it it to be disabled. */ bool use_spi_crc = 1; module_param(use_spi_crc, bool, 0); /* * Internal function. Schedule delayed work in the MMC work queue. */ static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { wake_lock(&mmc_delayed_work_wake_lock); return queue_delayed_work(workqueue, work, delay); } /* * Internal function. Flush all scheduled work from the MMC work queue. */ static void mmc_flush_scheduled_work(void) { flush_workqueue(workqueue); } #ifdef CONFIG_FAIL_MMC_REQUEST /* * Internal function. Inject random data errors. * If mmc_data is NULL no errors are injected. */ static void mmc_should_fail_request(struct mmc_host *host, struct mmc_request *mrq) { struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = mrq->data; static const int data_errors[] = { -ETIMEDOUT, -EILSEQ, -EIO, }; if (!data) return; if (cmd->error || data->error || !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; } #else /* CONFIG_FAIL_MMC_REQUEST */ static inline void mmc_should_fail_request(struct mmc_host *host, struct mmc_request *mrq) { } #endif /* CONFIG_FAIL_MMC_REQUEST */ /** * mmc_request_done - finish processing an MMC request * @host: MMC host which completed request * @mrq: MMC request which request * * MMC drivers should call this function when they have completed * their processing of a request. */ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) { struct mmc_command *cmd = mrq->cmd; int err = cmd->error; if (err && cmd->retries && mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) cmd->retries = 0; } if (err && cmd->retries && !mmc_card_removed(host->card)) { /* * Request starter must handle retries - see * mmc_wait_for_req_done(). */ if (mrq->done) mrq->done(mrq); } else { mmc_should_fail_request(host, mrq); led_trigger_event(host->led, LED_OFF); pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", mmc_hostname(host), cmd->opcode, err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (mrq->data) { pr_debug("%s: %d bytes transferred: %d\n", mmc_hostname(host), mrq->data->bytes_xfered, mrq->data->error); trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data); } if (mrq->stop) { pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", mmc_hostname(host), mrq->stop->opcode, mrq->stop->error, mrq->stop->resp[0], mrq->stop->resp[1], mrq->stop->resp[2], mrq->stop->resp[3]); } if (mrq->done) mrq->done(mrq); mmc_host_clk_release(host); } } EXPORT_SYMBOL(mmc_request_done); static void mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_MMC_DEBUG unsigned int i, sz; struct scatterlist *sg; #endif if (mrq->sbc) { pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", mmc_hostname(host), mrq->sbc->opcode, mrq->sbc->arg, mrq->sbc->flags); } pr_debug("%s: starting CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); if (mrq->data) { pr_debug("%s: blksz %d blocks %d flags %08x " "tsac %d ms nsac %d\n", mmc_hostname(host), mrq->data->blksz, mrq->data->blocks, mrq->data->flags, mrq->data->timeout_ns / 1000000, mrq->data->timeout_clks); } if (mrq->stop) { pr_debug("%s: CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->stop->opcode, mrq->stop->arg, mrq->stop->flags); } WARN_ON(!host->claimed); mrq->cmd->error = 0; mrq->cmd->mrq = mrq; if (mrq->data) { BUG_ON(mrq->data->blksz > host->max_blk_size); BUG_ON(mrq->data->blocks > host->max_blk_count); BUG_ON(mrq->data->blocks * mrq->data->blksz > host->max_req_size); #ifdef CONFIG_MMC_DEBUG sz = 0; for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) sz += sg->length; BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); #endif mrq->cmd->data = mrq->data; mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { mrq->data->stop = mrq->stop; mrq->stop->error = 0; mrq->stop->mrq = mrq; } } mmc_host_clk_hold(host); led_trigger_event(host->led, LED_FULL); host->ops->request(host, mrq); } /** * mmc_start_bkops - start BKOPS for supported cards * @card: MMC card to start BKOPS * @form_exception: A flag to indicate if this function was * called due to an exception raised by the card * * Start background operations whenever requested. * When the urgent BKOPS bit is set in a R1 command response * then background operations should be started immediately. */ void mmc_start_bkops(struct mmc_card *card, bool from_exception) { int err; int timeout; bool use_busy_signal; BUG_ON(!card); if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) return; err = mmc_read_bkops_status(card); if (err) { pr_err("%s: Failed to read bkops status: %d\n", mmc_hostname(card->host), err); return; } if (!card->ext_csd.raw_bkops_status) return; if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && from_exception) return; mmc_claim_host(card->host); if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { timeout = MMC_BKOPS_MAX_TIMEOUT; use_busy_signal = true; } else { timeout = 0; use_busy_signal = false; } err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true, false); if (err) { pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); goto out; } /* * For urgent bkops status (LEVEL_2 and more) * bkops executed synchronously, otherwise * the operation is in progress */ if (!use_busy_signal) mmc_card_set_doing_bkops(card); out: mmc_release_host(card->host); } EXPORT_SYMBOL(mmc_start_bkops); /* * mmc_wait_data_done() - done callback for data request * @mrq: done data request * * Wakes up mmc context, passed as a callback to host controller driver */ static void mmc_wait_data_done(struct mmc_request *mrq) { mrq->host->context_info.is_done_rcv = true; wake_up_interruptible(&mrq->host->context_info.wait); } static void mmc_wait_done(struct mmc_request *mrq) { complete(&mrq->completion); } /* *__mmc_start_data_req() - starts data request * @host: MMC host to start the request * @mrq: data request to start * * Sets the done callback to be called when request is completed by the card. * Starts data mmc request execution */ static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) { mrq->done = mmc_wait_data_done; mrq->host = host; if (mmc_card_removed(host->card)) { mrq->cmd->error = -ENOMEDIUM; mmc_wait_data_done(mrq); return -ENOMEDIUM; } mmc_start_request(host, mrq); return 0; } static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) { init_completion(&mrq->completion); mrq->done = mmc_wait_done; if (mmc_card_removed(host->card)) { mrq->cmd->error = -ENOMEDIUM; complete(&mrq->completion); return -ENOMEDIUM; } mmc_start_request(host, mrq); return 0; } /* * mmc_wait_for_data_req_done() - wait for request completed * @host: MMC host to prepare the command. * @mrq: MMC request to wait for * * Blocks MMC context till host controller will ack end of data request * execution or new request notification arrives from the block layer. * Handles command retries. * * Returns enum mmc_blk_status after checking errors. */ static int mmc_wait_for_data_req_done(struct mmc_host *host, struct mmc_request *mrq, struct mmc_async_req *next_req) { struct mmc_command *cmd; struct mmc_context_info *context_info = &host->context_info; int err; unsigned long flags; while (1) { wait_event_interruptible(context_info->wait, (context_info->is_done_rcv || context_info->is_new_req)); spin_lock_irqsave(&context_info->lock, flags); context_info->is_waiting_last_req = false; spin_unlock_irqrestore(&context_info->lock, flags); if (context_info->is_done_rcv) { context_info->is_done_rcv = false; context_info->is_new_req = false; cmd = mrq->cmd; if (!cmd->error || !cmd->retries || mmc_card_removed(host->card)) { err = host->areq->err_check(host->card, host->areq); break; /* return err */ } else { pr_info("%s: req failed (CMD%u): %d, retrying...\n", mmc_hostname(host), cmd->opcode, cmd->error); cmd->retries--; cmd->error = 0; host->ops->request(host, mrq); continue; /* wait for done/new event again */ } } else if (context_info->is_new_req) { context_info->is_new_req = false; if (!next_req) { err = MMC_BLK_NEW_REQUEST; break; /* return err */ } } } return err; } static void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) { struct mmc_command *cmd; while (1) { wait_for_completion(&mrq->completion); cmd = mrq->cmd; /* * If host has timed out waiting for the sanitize * to complete, card might be still in programming state * so let's try to bring the card out of programming * state. */ if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { if (!mmc_interrupt_hpi(host->card)) { pr_warn("%s: %s: Interrupted sanitize\n", mmc_hostname(host), __func__); cmd->error = 0; break; } else { pr_err("%s: %s: Failed to interrupt sanitize\n", mmc_hostname(host), __func__); } } if (!cmd->error || !cmd->retries || mmc_card_removed(host->card)) break; pr_debug("%s: req failed (CMD%u): %d, retrying...\n", mmc_hostname(host), cmd->opcode, cmd->error); cmd->retries--; cmd->error = 0; host->ops->request(host, mrq); } } /** * mmc_pre_req - Prepare for a new request * @host: MMC host to prepare command * @mrq: MMC request to prepare for * @is_first_req: true if there is no previous started request * that may run in parellel to this call, otherwise false * * mmc_pre_req() is called in prior to mmc_start_req() to let * host prepare for the new request. Preparation of a request may be * performed while another request is running on the host. */ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, bool is_first_req) { if (host->ops->pre_req) { mmc_host_clk_hold(host); host->ops->pre_req(host, mrq, is_first_req); mmc_host_clk_release(host); } } /** * mmc_post_req - Post process a completed request * @host: MMC host to post process command * @mrq: MMC request to post process for * @err: Error, if non zero, clean up any resources made in pre_req * * Let the host post process a completed request. Post processing of * a request may be performed while another reuqest is running. */ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, int err) { if (host->ops->post_req) { mmc_host_clk_hold(host); host->ops->post_req(host, mrq, err); mmc_host_clk_release(host); } } /** * mmc_start_req - start a non-blocking request * @host: MMC host to start command * @areq: async request to start * @error: out parameter returns 0 for success, otherwise non zero * * Start a new MMC custom command request for a host. * If there is on ongoing async request wait for completion * of that request and start the new one and return. * Does not wait for the new request to complete. * * Returns the completed request, NULL in case of none completed. * Wait for the an ongoing request (previoulsy started) to complete and * return the completed request. If there is no ongoing request, NULL * is returned without waiting. NULL is not an error condition. */ struct mmc_async_req *mmc_start_req(struct mmc_host *host, struct mmc_async_req *areq, int *error) { int err = 0; int start_err = 0; struct mmc_async_req *data = host->areq; /* Prepare a new request */ if (areq) mmc_pre_req(host, areq->mrq, !host->areq); if (host->areq) { err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); if (err == MMC_BLK_NEW_REQUEST) { if (error) *error = err; /* * The previous request was not completed, * nothing to return */ return NULL; } /* * Check BKOPS urgency for each R1 response */ if (host->card && mmc_card_mmc(host->card) && ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) { /* Cancel the prepared request */ if (areq) mmc_post_req(host, areq->mrq, -EINVAL); mmc_start_bkops(host->card, true); /* prepare the request again */ if (areq) mmc_pre_req(host, areq->mrq, !host->areq); } } if (!err && areq) { trace_mmc_blk_rw_start(areq->mrq->cmd->opcode, areq->mrq->cmd->arg, areq->mrq->data); start_err = __mmc_start_data_req(host, areq->mrq); } if (host->areq) mmc_post_req(host, host->areq->mrq, 0); /* Cancel a prepared request if it was not started. */ if ((err || start_err) && areq) mmc_post_req(host, areq->mrq, -EINVAL); if (err) host->areq = NULL; else host->areq = areq; if (error) *error = err; return data; } EXPORT_SYMBOL(mmc_start_req); /** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command * @mrq: MMC request to start * * Start a new MMC custom command request for a host, and wait * for the command to complete. Does not attempt to parse the * response. */ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) { __mmc_start_req(host, mrq); mmc_wait_for_req_done(host, mrq); } EXPORT_SYMBOL(mmc_wait_for_req); /** * mmc_interrupt_hpi - Issue for High priority Interrupt * @card: the MMC card associated with the HPI transfer * * Issued High Priority Interrupt, and check for card status * until out-of prg-state. */ int mmc_interrupt_hpi(struct mmc_card *card) { int err; u32 status; unsigned long prg_wait; BUG_ON(!card); if (!card->ext_csd.hpi_en) { pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); return 1; } mmc_claim_host(card->host); err = mmc_send_status(card, &status); if (err) { pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); goto out; } switch (R1_CURRENT_STATE(status)) { case R1_STATE_IDLE: case R1_STATE_READY: case R1_STATE_STBY: case R1_STATE_TRAN: /* * In idle and transfer states, HPI is not needed and the caller * can issue the next intended command immediately */ goto out; case R1_STATE_PRG: break; default: /* In all other states, it's illegal to issue HPI */ pr_debug("%s: HPI cannot be sent. Card state=%d\n", mmc_hostname(card->host), R1_CURRENT_STATE(status)); err = -EINVAL; goto out; } err = mmc_send_hpi_cmd(card, &status); if (err) goto out; prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); do { err = mmc_send_status(card, &status); if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) break; if (time_after(jiffies, prg_wait)) err = -ETIMEDOUT; } while (!err); out: mmc_release_host(card->host); return err; } EXPORT_SYMBOL(mmc_interrupt_hpi); /** * mmc_wait_for_cmd - start a command and wait for completion * @host: MMC host to start command * @cmd: MMC command to start * @retries: maximum number of retries * * Start a new MMC command for a host, and wait for the command * to complete. Return any error that occurred while the command * was executing. Do not attempt to parse the response. */ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) { struct mmc_request mrq = {NULL}; WARN_ON(!host->claimed); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = retries; mrq.cmd = cmd; cmd->data = NULL; mmc_wait_for_req(host, &mrq); return cmd->error; } EXPORT_SYMBOL(mmc_wait_for_cmd); /** * mmc_stop_bkops - stop ongoing BKOPS * @card: MMC card to check BKOPS * * Send HPI command to stop ongoing background operations to * allow rapid servicing of foreground operations, e.g. read/ * writes. Wait until the card comes out of the programming state * to avoid errors in servicing read/write requests. */ int mmc_stop_bkops(struct mmc_card *card) { int err = 0; BUG_ON(!card); err = mmc_interrupt_hpi(card); /* * If err is EINVAL, we can't issue an HPI. * It should complete the BKOPS. */ if (!err || (err == -EINVAL)) { mmc_card_clr_doing_bkops(card); err = 0; } return err; } EXPORT_SYMBOL(mmc_stop_bkops); int mmc_read_bkops_status(struct mmc_card *card) { int err; u8 *ext_csd; /* * In future work, we should consider storing the entire ext_csd. */ ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { pr_err("%s: could not allocate buffer to receive the ext_csd.\n", mmc_hostname(card->host)); return -ENOMEM; } mmc_claim_host(card->host); err = mmc_send_ext_csd(card, ext_csd); mmc_release_host(card->host); if (err) goto out; card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; out: kfree(ext_csd); return err; } EXPORT_SYMBOL(mmc_read_bkops_status); /** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type. */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) { unsigned int mult; /* * SDIO cards only define an upper 1 s limit on access. */ if (mmc_card_sdio(card)) { data->timeout_ns = 1000000000; data->timeout_clks = 0; return; } /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; if (mmc_host_clk_rate(card->host)) timeout_us += data->timeout_clks * 1000 / (mmc_host_clk_rate(card->host) / 1000); if (data->flags & MMC_DATA_WRITE) /* * The MMC spec "It is strongly recommended * for hosts to implement more than 500ms * timeout value even if the card indicates * the 250ms maximum busy length." Even the * previous value of 300ms is known to be * insufficient for some cards. */ limit_us = 3000000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } /* assign limit value if invalid */ if (timeout_us == 0) data->timeout_ns = limit_us * 1000; } /* * Some cards require longer data read timeout than indicated in CSD. * Address this by setting the read timeout to a "reasonably high" * value. For the cards tested, 300ms has proven enough. If necessary, * this value can be increased if other problematic cards require this. */ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { data->timeout_ns = 300000000; data->timeout_clks = 0; } /* * Some cards need very high timeouts if driven in SPI mode. * The worst observed timeout was 900ms after writing a * continuous stream of data until the internal logic * overflowed. */ if (mmc_host_is_spi(card->host)) { if (data->flags & MMC_DATA_WRITE) { if (data->timeout_ns < 1000000000) data->timeout_ns = 1000000000; /* 1s */ } else { if (data->timeout_ns < 100000000) data->timeout_ns = 100000000; /* 100ms */ } } } EXPORT_SYMBOL(mmc_set_data_timeout); /** * mmc_align_data_size - pads a transfer size to a more optimal value * @card: the MMC card associated with the data transfer * @sz: original transfer size * * Pads the original data size with a number of extra bytes in * order to avoid controller bugs and/or performance hits * (e.g. some controllers revert to PIO for certain sizes). * * Returns the improved size, which might be unmodified. * * Note that this function is only relevant when issuing a * single scatter gather entry. */ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) { /* * FIXME: We don't have a system for the controller to tell * the core about its problems yet, so for now we just 32-bit * align the size. */ sz = ((sz + 3) / 4) * 4; return sz; } EXPORT_SYMBOL(mmc_align_data_size); /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @abort: whether or not the operation should be aborted * * Claim a host for a set of operations. If @abort is non null and * dereference a non-zero value then this will return prematurely with * that non-zero value without acquiring the lock. Returns zero * with the lock held otherwise. */ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; int stop; might_sleep(); add_wait_queue(&host->wq, &wait); spin_lock_irqsave(&host->lock, flags); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); stop = abort ? atomic_read(abort) : 0; if (stop || !host->claimed || host->claimer == current) break; spin_unlock_irqrestore(&host->lock, flags); schedule(); spin_lock_irqsave(&host->lock, flags); } set_current_state(TASK_RUNNING); if (!stop) { host->claimed = 1; host->claimer = current; host->claim_cnt += 1; } else wake_up(&host->wq); spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); if (host->ops->enable && !stop && host->claim_cnt == 1) host->ops->enable(host); return stop; } EXPORT_SYMBOL(__mmc_claim_host); /** * mmc_release_host - release a host * @host: mmc host to release * * Release a MMC host, allowing others to claim the host * for their operations. */ void mmc_release_host(struct mmc_host *host) { unsigned long flags; WARN_ON(!host->claimed); if (host->ops->disable && host->claim_cnt == 1) host->ops->disable(host); spin_lock_irqsave(&host->lock, flags); if (--host->claim_cnt) { /* Release for nested claim */ spin_unlock_irqrestore(&host->lock, flags); } else { host->claimed = 0; host->claimer = NULL; spin_unlock_irqrestore(&host->lock, flags); wake_up(&host->wq); } } EXPORT_SYMBOL(mmc_release_host); /* * This is a helper function, which fetches a runtime pm reference for the * card device and also claims the host. */ void mmc_get_card(struct mmc_card *card) { pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); } EXPORT_SYMBOL(mmc_get_card); /* * This is a helper function, which releases the host and drops the runtime * pm reference for the card device. */ void mmc_put_card(struct mmc_card *card) { mmc_release_host(card->host); pm_runtime_mark_last_busy(&card->dev); pm_runtime_put_autosuspend(&card->dev); } EXPORT_SYMBOL(mmc_put_card); /* * Internal function that does the actual ios call to the host driver, * optionally printing some debug output. */ static inline void mmc_set_ios(struct mmc_host *host) { struct mmc_ios *ios = &host->ios; pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " "width %u timing %u\n", mmc_hostname(host), ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select, ios->vdd, ios->bus_width, ios->timing); if (ios->clock > 0) mmc_set_ungated(host); host->ops->set_ios(host, ios); } /* * Control chip select pin on a host. */ void mmc_set_chip_select(struct mmc_host *host, int mode) { mmc_host_clk_hold(host); host->ios.chip_select = mode; mmc_set_ios(host); mmc_host_clk_release(host); } /* * Sets the host clock to the highest possible frequency that * is below "hz". */ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) { WARN_ON(hz && hz < host->f_min); if (hz > host->f_max) hz = host->f_max; host->ios.clock = hz; mmc_set_ios(host); } void mmc_set_clock(struct mmc_host *host, unsigned int hz) { mmc_host_clk_hold(host); __mmc_set_clock(host, hz); mmc_host_clk_release(host); } #ifdef CONFIG_MMC_CLKGATE /* * This gates the clock by setting it to 0 Hz. */ void mmc_gate_clock(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->clk_lock, flags); host->clk_old = host->ios.clock; host->ios.clock = 0; host->clk_gated = true; spin_unlock_irqrestore(&host->clk_lock, flags); mmc_set_ios(host); } /* * This restores the clock from gating by using the cached * clock value. */ void mmc_ungate_clock(struct mmc_host *host) { /* * We should previously have gated the clock, so the clock shall * be 0 here! The clock may however be 0 during initialization, * when some request operations are performed before setting * the frequency. When ungate is requested in that situation * we just ignore the call. */ if (host->clk_old) { BUG_ON(host->ios.clock); /* This call will also set host->clk_gated to false */ __mmc_set_clock(host, host->clk_old); } } void mmc_set_ungated(struct mmc_host *host) { unsigned long flags; /* * We've been given a new frequency while the clock is gated, * so make sure we regard this as ungating it. */ spin_lock_irqsave(&host->clk_lock, flags); host->clk_gated = false; spin_unlock_irqrestore(&host->clk_lock, flags); } #else void mmc_set_ungated(struct mmc_host *host) { } #endif /* * Change the bus mode (open drain/push-pull) of a host. */ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) { mmc_host_clk_hold(host); host->ios.bus_mode = mode; mmc_set_ios(host); mmc_host_clk_release(host); } /* * Change data bus width of a host. */ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) { mmc_host_clk_hold(host); host->ios.bus_width = width; mmc_set_ios(host); mmc_host_clk_release(host); } /** * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number * @vdd: voltage (mV) * @low_bits: prefer low bits in boundary cases * * This function returns the OCR bit number according to the provided @vdd * value. If conversion is not possible a negative errno value returned. * * Depending on the @low_bits flag the function prefers low or high OCR bits * on boundary voltages. For example, * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); * * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). */ static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) { const int max_bit = ilog2(MMC_VDD_35_36); int bit; if (vdd < 1650 || vdd > 3600) return -EINVAL; if (vdd >= 1650 && vdd <= 1950) return ilog2(MMC_VDD_165_195); if (low_bits) vdd -= 1; /* Base 2000 mV, step 100 mV, bit's base 8. */ bit = (vdd - 2000) / 100 + 8; if (bit > max_bit) return max_bit; return bit; } /** * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask * @vdd_min: minimum voltage value (mV) * @vdd_max: maximum voltage value (mV) * * This function returns the OCR mask bits according to the provided @vdd_min * and @vdd_max values. If conversion is not possible the function returns 0. * * Notes wrt boundary cases: * This function sets the OCR bits for all boundary voltages, for example * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | * MMC_VDD_34_35 mask. */ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) { u32 mask = 0; if (vdd_max < vdd_min) return 0; /* Prefer high bits for the boundary vdd_max values. */ vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); if (vdd_max < 0) return 0; /* Prefer low bits for the boundary vdd_min values. */ vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); if (vdd_min < 0) return 0; /* Fill the mask, from max bit to min bit. */ while (vdd_max >= vdd_min) mask |= 1 << vdd_max--; return mask; } EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); #ifdef CONFIG_OF /** * mmc_of_parse_voltage - return mask of supported voltages * @np: The device node need to be parsed. * @mask: mask of voltages available for MMC/SD/SDIO * * 1. Return zero on success. * 2. Return negative errno: voltage-range is invalid. */ int mmc_of_parse_voltage(struct device_node *np, u32 *mask) { const u32 *voltage_ranges; int num_ranges, i; voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; if (!voltage_ranges || !num_ranges) { pr_info("%s: voltage-ranges unspecified\n", np->full_name); return -EINVAL; } for (i = 0; i < num_ranges; i++) { const int j = i * 2; u32 ocr_mask; ocr_mask = mmc_vddrange_to_ocrmask( be32_to_cpu(voltage_ranges[j]), be32_to_cpu(voltage_ranges[j + 1])); if (!ocr_mask) { pr_err("%s: voltage-range #%d is invalid\n", np->full_name, i); return -EINVAL; } *mask |= ocr_mask; } return 0; } EXPORT_SYMBOL(mmc_of_parse_voltage); #endif /* CONFIG_OF */ #ifdef CONFIG_REGULATOR /** * mmc_regulator_get_ocrmask - return mask of supported voltages * @supply: regulator to use * * This returns either a negative errno, or a mask of voltages that * can be provided to MMC/SD/SDIO devices using the specified voltage * regulator. This would normally be called before registering the * MMC host adapter. */ int mmc_regulator_get_ocrmask(struct regulator *supply) { int result = 0; int count; int i; int vdd_uV; int vdd_mV; count = regulator_count_voltages(supply); if (count < 0) return count; for (i = 0; i < count; i++) { vdd_uV = regulator_list_voltage(supply, i); if (vdd_uV <= 0) continue; vdd_mV = vdd_uV / 1000; result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); } if (!result) { vdd_uV = regulator_get_voltage(supply); if (vdd_uV <= 0) return vdd_uV; vdd_mV = vdd_uV / 1000; result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); } return result; } EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); /** * mmc_regulator_set_ocr - set regulator to match host->ios voltage * @mmc: the host to regulate * @supply: regulator to use * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) * * Returns zero on success, else negative errno. * * MMC host drivers may use this to enable or disable a regulator using * a particular supply voltage. This would normally be called from the * set_ios() method. */ int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit) { int result = 0; int min_uV, max_uV; if (vdd_bit) { int tmp; /* * REVISIT mmc_vddrange_to_ocrmask() may have set some * bits this regulator doesn't quite support ... don't * be too picky, most cards and regulators are OK with * a 0.1V range goof (it's a small error percentage). */ tmp = vdd_bit - ilog2(MMC_VDD_165_195); if (tmp == 0) { min_uV = 1650 * 1000; max_uV = 1950 * 1000; } else { min_uV = 1900 * 1000 + tmp * 100 * 1000; max_uV = min_uV + 100 * 1000; } result = regulator_set_voltage(supply, min_uV, max_uV); if (result == 0 && !mmc->regulator_enabled) { result = regulator_enable(supply); if (!result) mmc->regulator_enabled = true; } } else if (mmc->regulator_enabled) { result = regulator_disable(supply); if (result == 0) mmc->regulator_enabled = false; } if (result) dev_err(mmc_dev(mmc), "could not set regulator OCR (%d)\n", result); return result; } EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); #endif /* CONFIG_REGULATOR */ int mmc_regulator_get_supply(struct mmc_host *mmc) { struct device *dev = mmc_dev(mmc); int ret; mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); if (IS_ERR(mmc->supply.vmmc)) { if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_info(dev, "No vmmc regulator found\n"); } else { ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); if (ret > 0) mmc->ocr_avail = ret; else dev_warn(dev, "Failed getting OCR mask: %d\n", ret); } if (IS_ERR(mmc->supply.vqmmc)) { if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_info(dev, "No vqmmc regulator found\n"); } return 0; } EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); /* * Mask off any voltages we don't support and select * the lowest voltage */ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { dev_warn(mmc_dev(host), "card claims to support voltages below defined range\n"); ocr &= ~0x7F; } ocr &= host->ocr_avail; if (!ocr) { dev_warn(mmc_dev(host), "no support for card's volts\n"); return 0; } if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { bit = ffs(ocr) - 1; ocr &= 3 << bit; mmc_power_cycle(host, ocr); } else { bit = fls(ocr) - 1; ocr &= 3 << bit; if (bit != host->ios.vdd) dev_warn(mmc_dev(host), "exceeding card's volts\n"); } return ocr; } int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) { int err = 0; int old_signal_voltage = host->ios.signal_voltage; host->ios.signal_voltage = signal_voltage; if (host->ops->start_signal_voltage_switch) { mmc_host_clk_hold(host); err = host->ops->start_signal_voltage_switch(host, &host->ios); mmc_host_clk_release(host); } if (err) host->ios.signal_voltage = old_signal_voltage; return err; } int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) { struct mmc_command cmd = {0}; int err = 0; u32 clock; BUG_ON(!host); /* * Send CMD11 only if the request is to switch the card to * 1.8V signalling. */ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) return __mmc_set_signal_voltage(host, signal_voltage); /* * If we cannot switch voltages, return failure so the caller * can continue without UHS mode */ if (!host->ops->start_signal_voltage_switch) return -EPERM; if (!host->ops->card_busy) pr_warn("%s: cannot verify signal voltage switch\n", mmc_hostname(host)); cmd.opcode = SD_SWITCH_VOLTAGE; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) return -EIO; mmc_host_clk_hold(host); /* * The card should drive cmd and dat[0:3] low immediately * after the response of cmd11, but wait 1 ms to be sure */ mmc_delay(1); if (host->ops->card_busy && !host->ops->card_busy(host)) { err = -EAGAIN; goto power_cycle; } /* * During a signal voltage level switch, the clock must be gated * for 5 ms according to the SD spec */ clock = host->ios.clock; host->ios.clock = 0; mmc_set_ios(host); if (__mmc_set_signal_voltage(host, signal_voltage)) { /* * Voltages may not have been switched, but we've already * sent CMD11, so a power cycle is required anyway */ err = -EAGAIN; goto power_cycle; } /* Keep clock gated for at least 5 ms */ mmc_delay(5); host->ios.clock = clock; mmc_set_ios(host); /* Wait for at least 1 ms according to spec */ mmc_delay(1); /* * Failure to switch is indicated by the card holding * dat[0:3] low */ if (host->ops->card_busy && host->ops->card_busy(host)) err = -EAGAIN; power_cycle: if (err) { pr_debug("%s: Signal voltage switch failed, " "power cycling card\n", mmc_hostname(host)); mmc_power_cycle(host, ocr); } mmc_host_clk_release(host); return err; } /* * Select timing parameters for host. */ void mmc_set_timing(struct mmc_host *host, unsigned int timing) { mmc_host_clk_hold(host); host->ios.timing = timing; mmc_set_ios(host); mmc_host_clk_release(host); } /* * Select appropriate driver type for host. */ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) { mmc_host_clk_hold(host); host->ios.drv_type = drv_type; mmc_set_ios(host); mmc_host_clk_release(host); } /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ void mmc_power_up(struct mmc_host *host, u32 ocr) { if (host->ios.power_mode == MMC_POWER_ON) return; mmc_host_clk_hold(host); host->ios.vdd = fls(ocr) - 1; if (mmc_host_is_spi(host)) host->ios.chip_select = MMC_CS_HIGH; else host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. */ mmc_delay(10); host->ios.clock = host->f_init; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); /* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage. */ mmc_delay(10); mmc_host_clk_release(host); } void mmc_power_off(struct mmc_host *host) { if (host->ios.power_mode == MMC_POWER_OFF) return; mmc_host_clk_hold(host); host->ios.clock = 0; host->ios.vdd = 0; if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; } host->ios.power_mode = MMC_POWER_OFF; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* * Some configurations, such as the 802.11 SDIO card in the OLPC * XO-1.5, require a short delay after poweroff before the card * can be successfully turned on again. */ mmc_delay(1); mmc_host_clk_release(host); } void mmc_power_cycle(struct mmc_host *host, u32 ocr) { mmc_power_off(host); /* Wait at least 1 ms according to SD spec */ mmc_delay(1); mmc_power_up(host, ocr); } /* * Cleanup when the last reference to the bus operator is dropped. */ static void __mmc_release_bus(struct mmc_host *host) { BUG_ON(!host); BUG_ON(host->bus_refs); BUG_ON(!host->bus_dead); host->bus_ops = NULL; } /* * Increase reference count of bus operator */ static inline void mmc_bus_get(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->bus_refs++; spin_unlock_irqrestore(&host->lock, flags); } /* * Decrease reference count of bus operator and free it if * it is the last reference. */ static inline void mmc_bus_put(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->bus_refs--; if ((host->bus_refs == 0) && host->bus_ops) __mmc_release_bus(host); spin_unlock_irqrestore(&host->lock, flags); } /* * Assign a mmc bus handler to a host. Only one bus handler may control a * host at any given time. */ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) { unsigned long flags; BUG_ON(!host); BUG_ON(!ops); WARN_ON(!host->claimed); spin_lock_irqsave(&host->lock, flags); BUG_ON(host->bus_ops); BUG_ON(host->bus_refs); host->bus_ops = ops; host->bus_refs = 1; host->bus_dead = 0; spin_unlock_irqrestore(&host->lock, flags); } /* * Remove the current bus handler from a host. */ void mmc_detach_bus(struct mmc_host *host) { unsigned long flags; BUG_ON(!host); WARN_ON(!host->claimed); WARN_ON(!host->bus_ops); spin_lock_irqsave(&host->lock, flags); host->bus_dead = 1; spin_unlock_irqrestore(&host->lock, flags); mmc_bus_put(host); } static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq) { #ifdef CONFIG_MMC_DEBUG unsigned long flags; spin_lock_irqsave(&host->lock, flags); WARN_ON(host->removed); spin_unlock_irqrestore(&host->lock, flags); #endif /* * If the device is configured as wakeup, we prevent a new sleep for * 5 s to give provision for user space to consume the event. */ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && device_can_wakeup(mmc_dev(host))) pm_wakeup_event(mmc_dev(host), 5000); host->detect_change = 1; mmc_schedule_delayed_work(&host->detect, delay); } /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. * @delay: optional delay to wait before detection (jiffies) * * MMC drivers should call this when they detect a card has been * inserted or removed. The MMC layer will confirm that any * present card is still functional, and initialize any newly * inserted. */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { _mmc_detect_change(host, delay, true); } EXPORT_SYMBOL(mmc_detect_change); void mmc_init_erase(struct mmc_card *card) { unsigned int sz; if (is_power_of_2(card->erase_size)) card->erase_shift = ffs(card->erase_size) - 1; else card->erase_shift = 0; /* * It is possible to erase an arbitrarily large area of an SD or MMC * card. That is not desirable because it can take a long time * (minutes) potentially delaying more important I/O, and also the * timeout calculations become increasingly hugely over-estimated. * Consequently, 'pref_erase' is defined as a guide to limit erases * to that size and alignment. * * For SD cards that define Allocation Unit size, limit erases to one * Allocation Unit at a time. For MMC cards that define High Capacity * Erase Size, whether it is switched on or not, limit to that size. * Otherwise just have a stab at a good value. For modern cards it * will end up being 4MiB. Note that if the value is too small, it * can end up taking longer to erase. */ if (mmc_card_sd(card) && card->ssr.au) { card->pref_erase = card->ssr.au; card->erase_shift = ffs(card->ssr.au) - 1; } else if (card->ext_csd.hc_erase_size) { card->pref_erase = card->ext_csd.hc_erase_size; } else if (card->erase_size) { sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; if (sz < 128) card->pref_erase = 512 * 1024 / 512; else if (sz < 512) card->pref_erase = 1024 * 1024 / 512; else if (sz < 1024) card->pref_erase = 2 * 1024 * 1024 / 512; else card->pref_erase = 4 * 1024 * 1024 / 512; if (card->pref_erase < card->erase_size) card->pref_erase = card->erase_size; else { sz = card->pref_erase % card->erase_size; if (sz) card->pref_erase += card->erase_size - sz; } } else card->pref_erase = 0; } static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, unsigned int arg, unsigned int qty) { unsigned int erase_timeout; if (arg == MMC_DISCARD_ARG || (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { erase_timeout = card->ext_csd.trim_timeout; } else if (card->ext_csd.erase_group_def & 1) { /* High Capacity Erase Group Size uses HC timeouts */ if (arg == MMC_TRIM_ARG) erase_timeout = card->ext_csd.trim_timeout; else erase_timeout = card->ext_csd.hc_erase_timeout; } else { /* CSD Erase Group Size uses write timeout */ unsigned int mult = (10 << card->csd.r2w_factor); unsigned int timeout_clks = card->csd.tacc_clks * mult; unsigned int timeout_us; /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ if (card->csd.tacc_ns < 1000000) timeout_us = (card->csd.tacc_ns * mult) / 1000; else timeout_us = (card->csd.tacc_ns / 1000) * mult; /* * ios.clock is only a target. The real clock rate might be * less but not that much less, so fudge it by multiplying by 2. */ timeout_clks <<= 1; timeout_us += (timeout_clks * 1000) / (mmc_host_clk_rate(card->host) / 1000); erase_timeout = timeout_us / 1000; /* * Theoretically, the calculation could underflow so round up * to 1ms in that case. */ if (!erase_timeout) erase_timeout = 1; } /* Multiplier for secure operations */ if (arg & MMC_SECURE_ARGS) { if (arg == MMC_SECURE_ERASE_ARG) erase_timeout *= card->ext_csd.sec_erase_mult; else erase_timeout *= card->ext_csd.sec_trim_mult; } erase_timeout *= qty; /* * Ensure at least a 1 second timeout for SPI as per * 'mmc_set_data_timeout()' */ if (mmc_host_is_spi(card->host) && erase_timeout < 1000) erase_timeout = 1000; return erase_timeout; } static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, unsigned int arg, unsigned int qty) { unsigned int erase_timeout; if (card->ssr.erase_timeout) { /* Erase timeout specified in SD Status Register (SSR) */ erase_timeout = card->ssr.erase_timeout * qty + card->ssr.erase_offset; } else { /* * Erase timeout not specified in SD Status Register (SSR) so * use 250ms per write block. */ erase_timeout = 250 * qty; } /* Must not be less than 1 second */ if (erase_timeout < 1000) erase_timeout = 1000; return erase_timeout; } static unsigned int mmc_erase_timeout(struct mmc_card *card, unsigned int arg, unsigned int qty) { if (mmc_card_sd(card)) return mmc_sd_erase_timeout(card, arg, qty); else return mmc_mmc_erase_timeout(card, arg, qty); } static int mmc_do_erase(struct mmc_card *card, unsigned int from, unsigned int to, unsigned int arg) { struct mmc_command cmd = {0}; unsigned int qty = 0; unsigned long timeout; unsigned int fr, nr; int err; fr = from; nr = to - from + 1; trace_mmc_blk_erase_start(arg, fr, nr); /* * qty is used to calculate the erase timeout which depends on how many * erase groups (or allocation units in SD terminology) are affected. * We count erasing part of an erase group as one erase group. * For SD, the allocation units are always a power of 2. For MMC, the * erase group size is almost certainly also power of 2, but it does not * seem to insist on that in the JEDEC standard, so we fall back to * division in that case. SD may not specify an allocation unit size, * in which case the timeout is based on the number of write blocks. * * Note that the timeout for secure trim 2 will only be correct if the * number of erase groups specified is the same as the total of all * preceding secure trim 1 commands. Since the power may have been * lost since the secure trim 1 commands occurred, it is generally * impossible to calculate the secure trim 2 timeout correctly. */ if (card->erase_shift) qty += ((to >> card->erase_shift) - (from >> card->erase_shift)) + 1; else if (mmc_card_sd(card)) qty += to - from + 1; else qty += ((to / card->erase_size) - (from / card->erase_size)) + 1; if (!mmc_card_blockaddr(card)) { from <<= 9; to <<= 9; } if (mmc_card_sd(card)) cmd.opcode = SD_ERASE_WR_BLK_START; else cmd.opcode = MMC_ERASE_GROUP_START; cmd.arg = from; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_err("mmc_erase: group start error %d, " "status %#x\n", err, cmd.resp[0]); err = -EIO; goto out; } memset(&cmd, 0, sizeof(struct mmc_command)); if (mmc_card_sd(card)) cmd.opcode = SD_ERASE_WR_BLK_END; else cmd.opcode = MMC_ERASE_GROUP_END; cmd.arg = to; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_err("mmc_erase: group end error %d, status %#x\n", err, cmd.resp[0]); err = -EIO; goto out; } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_ERASE; cmd.arg = arg; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; cmd.busy_timeout = mmc_erase_timeout(card, arg, qty); err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_err("mmc_erase: erase error %d, status %#x\n", err, cmd.resp[0]); err = -EIO; goto out; } if (mmc_host_is_spi(card->host)) goto out; timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; /* Do not retry else we can't see errors */ err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err || (cmd.resp[0] & 0xFDF92000)) { pr_err("error %d requesting status %#x\n", err, cmd.resp[0]); err = -EIO; goto out; } /* Timeout if the device never becomes ready for data and * never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s\n", mmc_hostname(card->host), __func__); err = -EIO; goto out; } } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); out: trace_mmc_blk_erase_end(arg, fr, nr); return err; } /** * mmc_erase - erase sectors. * @card: card to erase * @from: first sector to erase * @nr: number of sectors to erase * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) * * Caller must claim host before calling this function. */ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, unsigned int arg) { unsigned int rem, to = from + nr; if (!(card->host->caps & MMC_CAP_ERASE) || !(card->csd.cmdclass & CCC_ERASE)) return -EOPNOTSUPP; if (!card->erase_size) return -EOPNOTSUPP; if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) return -EOPNOTSUPP; if ((arg & MMC_SECURE_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) return -EOPNOTSUPP; if ((arg & MMC_TRIM_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP; if (arg == MMC_SECURE_ERASE_ARG) { if (from % card->erase_size || nr % card->erase_size) return -EINVAL; } if (arg == MMC_ERASE_ARG) { rem = from % card->erase_size; if (rem) { rem = card->erase_size - rem; from += rem; if (nr > rem) nr -= rem; else return 0; } rem = nr % card->erase_size; if (rem) nr -= rem; } if (nr == 0) return 0; to = from + nr; if (to <= from) return -EINVAL; /* 'from' and 'to' are inclusive */ to -= 1; return mmc_do_erase(card, from, to, arg); } EXPORT_SYMBOL(mmc_erase); int mmc_can_erase(struct mmc_card *card) { if ((card->host->caps & MMC_CAP_ERASE) && (card->csd.cmdclass & CCC_ERASE) && card->erase_size) return 1; return 0; } EXPORT_SYMBOL(mmc_can_erase); int mmc_can_trim(struct mmc_card *card) { if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) return 1; return 0; } EXPORT_SYMBOL(mmc_can_trim); int mmc_can_discard(struct mmc_card *card) { /* * As there's no way to detect the discard support bit at v4.5 * use the s/w feature support filed. */ if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) return 1; return 0; } EXPORT_SYMBOL(mmc_can_discard); int mmc_can_sanitize(struct mmc_card *card) { if (!mmc_can_trim(card) && !mmc_can_erase(card)) return 0; if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) return 1; return 0; } EXPORT_SYMBOL(mmc_can_sanitize); int mmc_can_secure_erase_trim(struct mmc_card *card) { if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) && !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) return 1; return 0; } EXPORT_SYMBOL(mmc_can_secure_erase_trim); int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, unsigned int nr) { if (!card->erase_size) return 0; if (from % card->erase_size || nr % card->erase_size) return 0; return 1; } EXPORT_SYMBOL(mmc_erase_group_aligned); static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, unsigned int arg) { struct mmc_host *host = card->host; unsigned int max_discard, x, y, qty = 0, max_qty, timeout; unsigned int last_timeout = 0; if (card->erase_shift) max_qty = UINT_MAX >> card->erase_shift; else if (mmc_card_sd(card)) max_qty = UINT_MAX; else max_qty = UINT_MAX / card->erase_size; /* Find the largest qty with an OK timeout */ do { y = 0; for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { timeout = mmc_erase_timeout(card, arg, qty + x); if (timeout > host->max_busy_timeout) break; if (timeout < last_timeout) break; last_timeout = timeout; y = x; } qty += y; } while (y); if (!qty) return 0; if (qty == 1) return 1; /* Convert qty to sectors */ if (card->erase_shift) max_discard = --qty << card->erase_shift; else if (mmc_card_sd(card)) max_discard = qty; else max_discard = --qty * card->erase_size; return max_discard; } unsigned int mmc_calc_max_discard(struct mmc_card *card) { struct mmc_host *host = card->host; unsigned int max_discard, max_trim; if (!host->max_busy_timeout) return UINT_MAX; /* * Without erase_group_def set, MMC erase timeout depends on clock * frequence which can change. In that case, the best choice is * just the preferred erase size. */ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) return card->pref_erase; max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); if (mmc_can_trim(card)) { max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); if (max_trim < max_discard) max_discard = max_trim; } else if (max_discard < card->erase_size) { max_discard = 0; } pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", mmc_hostname(host), max_discard, host->max_busy_timeout); return max_discard; } EXPORT_SYMBOL(mmc_calc_max_discard); int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) { struct mmc_command cmd = {0}; if (mmc_card_blockaddr(card) || mmc_card_ddr52(card)) return 0; cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = blocklen; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; return mmc_wait_for_cmd(card->host, &cmd, 5); } EXPORT_SYMBOL(mmc_set_blocklen); int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, bool is_rel_write) { struct mmc_command cmd = {0}; cmd.opcode = MMC_SET_BLOCK_COUNT; cmd.arg = blockcount & 0x0000FFFF; if (is_rel_write) cmd.arg |= 1 << 31; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; return mmc_wait_for_cmd(card->host, &cmd, 5); } EXPORT_SYMBOL(mmc_set_blockcount); static void mmc_hw_reset_for_init(struct mmc_host *host) { if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return; mmc_host_clk_hold(host); host->ops->hw_reset(host); mmc_host_clk_release(host); } int mmc_can_reset(struct mmc_card *card) { u8 rst_n_function; if (!mmc_card_mmc(card)) return 0; rst_n_function = card->ext_csd.rst_n_function; if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) return 0; return 1; } EXPORT_SYMBOL(mmc_can_reset); static int mmc_do_hw_reset(struct mmc_host *host, int check) { struct mmc_card *card = host->card; if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return -EOPNOTSUPP; if (!card) return -EINVAL; if (!mmc_can_reset(card)) return -EOPNOTSUPP; mmc_host_clk_hold(host); mmc_set_clock(host, host->f_init); host->ops->hw_reset(host); /* If the reset has happened, then a status command will fail */ if (check) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (!err) { mmc_host_clk_release(host); return -ENOSYS; } } if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; } else { host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; } host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); mmc_host_clk_release(host); return host->bus_ops->power_restore(host); } int mmc_hw_reset(struct mmc_host *host) { return mmc_do_hw_reset(host, 0); } EXPORT_SYMBOL(mmc_hw_reset); int mmc_hw_reset_check(struct mmc_host *host) { return mmc_do_hw_reset(host, 1); } EXPORT_SYMBOL(mmc_hw_reset_check); static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) { host->f_init = freq; #ifdef CONFIG_MMC_DEBUG pr_info("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); #endif mmc_power_up(host, host->ocr_avail); /* * Some eMMCs (with VCCQ always on) may not be reset after power up, so * do a hardware reset if possible. */ mmc_hw_reset_for_init(host); /* * sdio_reset sends CMD52 to reset card. Since we do not know * if the card is being re-initialized, just send it. CMD52 * should be ignored by SD/eMMC cards. */ sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* Order's important: probe SDIO, then SD, then MMC */ if (!mmc_attach_sdio(host)) return 0; if (!mmc_attach_sd(host)) return 0; if (!mmc_attach_mmc(host)) return 0; mmc_power_off(host); return -EIO; } int _mmc_detect_card_removed(struct mmc_host *host) { int ret; if (host->caps & MMC_CAP_NONREMOVABLE) return 0; if (!host->card || mmc_card_removed(host->card)) return 1; ret = host->bus_ops->alive(host); /* * Card detect status and alive check may be out of sync if card is * removed slowly, when card detect switch changes while card/slot * pads are still contacted in hardware (refer to "SD Card Mechanical * Addendum, Appendix C: Card Detection Switch"). So reschedule a * detect work 200ms later for this case. */ if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { mmc_detect_change(host, msecs_to_jiffies(200)); pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); } if (ret) { mmc_card_set_removed(host->card); pr_debug("%s: card remove detected\n", mmc_hostname(host)); } return ret; } int mmc_detect_card_removed(struct mmc_host *host) { struct mmc_card *card = host->card; int ret; WARN_ON(!host->claimed); if (!card) return 1; ret = mmc_card_removed(card); /* * The card will be considered unchanged unless we have been asked to * detect a change or host requires polling to provide card detection. */ if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) return ret; host->detect_change = 0; if (!ret) { ret = _mmc_detect_card_removed(host); if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { /* * Schedule a detect work as soon as possible to let a * rescan handle the card removal. */ cancel_delayed_work(&host->detect); _mmc_detect_change(host, 0, false); } } return ret; } EXPORT_SYMBOL(mmc_detect_card_removed); void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); int i; bool extend_wakelock = false; if (host->trigger_card_event && host->ops->card_event) { host->ops->card_event(host); host->trigger_card_event = false; } if (host->rescan_disable) return; /* If there is a non-removable card registered, only scan once */ if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) return; host->rescan_entered = 1; mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ if (host->bus_ops && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) host->bus_ops->detect(host); host->detect_change = 0; /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; /* * Let mmc_bus_put() free the bus/bus_ops if we've found that * the card is no longer present. */ mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && host->ops->get_cd(host) == 0) { mmc_claim_host(host); mmc_power_off(host); mmc_release_host(host); goto out; } mmc_claim_host(host); for (i = 0; i < ARRAY_SIZE(freqs); i++) { if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) { extend_wakelock = true; break; } if (freqs[i] <= host->f_min) break; } mmc_release_host(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); } void mmc_start_host(struct mmc_host *host) { host->f_init = max(freqs[0], host->f_min); host->rescan_disable = 0; host->ios.power_mode = MMC_POWER_UNDEFINED; if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) mmc_power_off(host); else mmc_power_up(host, host->ocr_avail); mmc_gpiod_request_cd_irq(host); _mmc_detect_change(host, 0, false); } void mmc_stop_host(struct mmc_host *host) { #ifdef CONFIG_MMC_DEBUG unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->removed = 1; spin_unlock_irqrestore(&host->lock, flags); #endif if (host->slot.cd_irq >= 0) disable_irq(host->slot.cd_irq); host->rescan_disable = 1; cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ host->pm_flags = 0; mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_power_off(host); mmc_release_host(host); mmc_bus_put(host); return; } mmc_bus_put(host); BUG_ON(host->card); mmc_power_off(host); } int mmc_power_save_host(struct mmc_host *host) { int ret = 0; #ifdef CONFIG_MMC_DEBUG pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); #endif mmc_bus_get(host); if (!host->bus_ops || host->bus_dead) { mmc_bus_put(host); return -EINVAL; } if (host->bus_ops->power_save) ret = host->bus_ops->power_save(host); mmc_bus_put(host); mmc_power_off(host); return ret; } EXPORT_SYMBOL(mmc_power_save_host); int mmc_power_restore_host(struct mmc_host *host) { int ret; #ifdef CONFIG_MMC_DEBUG pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); #endif mmc_bus_get(host); if (!host->bus_ops || host->bus_dead) { mmc_bus_put(host); return -EINVAL; } mmc_power_up(host, host->card->ocr); ret = host->bus_ops->power_restore(host); mmc_bus_put(host); return ret; } EXPORT_SYMBOL(mmc_power_restore_host); /* * Flush the cache to the non-volatile storage. */ int mmc_flush_cache(struct mmc_card *card) { int err = 0; if (mmc_card_mmc(card) && (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE, 1, 0); if (err) pr_err("%s: cache flush error %d\n", mmc_hostname(card->host), err); } return err; } EXPORT_SYMBOL(mmc_flush_cache); #ifdef CONFIG_PM /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. */ int mmc_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { struct mmc_host *host = container_of( notify_block, struct mmc_host, pm_notify); unsigned long flags; int err = 0; switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 1; spin_unlock_irqrestore(&host->lock, flags); cancel_delayed_work_sync(&host->detect); if (!host->bus_ops) break; /* Validate prerequisites for suspend */ if (host->bus_ops->pre_suspend) err = host->bus_ops->pre_suspend(host); if (!err) break; /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_power_off(host); mmc_release_host(host); host->pm_flags = 0; break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: case PM_POST_RESTORE: spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; spin_unlock_irqrestore(&host->lock, flags); _mmc_detect_change(host, 0, false); } return 0; } #endif /** * mmc_init_context_info() - init synchronization context * @host: mmc host * * Init struct context_info needed to implement asynchronous * request mechanism, used by mmc core, host driver and mmc requests * supplier. */ void mmc_init_context_info(struct mmc_host *host) { spin_lock_init(&host->context_info.lock); host->context_info.is_new_req = false; host->context_info.is_done_rcv = false; host->context_info.is_waiting_last_req = false; init_waitqueue_head(&host->context_info.wait); } #ifdef CONFIG_MMC_EMBEDDED_SDIO void mmc_set_embedded_sdio_data(struct mmc_host *host, struct sdio_cis *cis, struct sdio_cccr *cccr, struct sdio_embedded_func *funcs, int num_funcs) { host->embedded_sdio_data.cis = cis; host->embedded_sdio_data.cccr = cccr; host->embedded_sdio_data.funcs = funcs; host->embedded_sdio_data.num_funcs = num_funcs; } EXPORT_SYMBOL(mmc_set_embedded_sdio_data); #endif static int __init mmc_init(void) { int ret; workqueue = alloc_ordered_workqueue("kmmcd", 0); if (!workqueue) return -ENOMEM; wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, "mmc_delayed_work"); ret = mmc_register_bus(); if (ret) goto destroy_workqueue; ret = mmc_register_host_class(); if (ret) goto unregister_bus; ret = sdio_register_bus(); if (ret) goto unregister_host_class; return 0; unregister_host_class: mmc_unregister_host_class(); unregister_bus: mmc_unregister_bus(); destroy_workqueue: destroy_workqueue(workqueue); wake_lock_destroy(&mmc_delayed_work_wake_lock); return ret; } static void __exit mmc_exit(void) { sdio_unregister_bus(); mmc_unregister_host_class(); mmc_unregister_bus(); destroy_workqueue(workqueue); wake_lock_destroy(&mmc_delayed_work_wake_lock); } subsys_initcall(mmc_init); module_exit(mmc_exit); MODULE_LICENSE("GPL");
Java
<?php use Ctct\Util\CurlResponse; use Ctct\Services\ListService; use Ctct\Util\RestClient; use Ctct\Components\Contacts\ContactList; class ListServiceUnitTest extends PHPUnit_Framework_TestCase { private $restClient; private $listService; public function setUp() { $this->restClient = $this->getMock('Ctct\Util\RestClientInterface'); $this->listService = new ListService("apikey", $this->restClient); } public function testGetLists() { $curlResponse = CurlResponse::create(JsonLoader::getListsJson(), array('http_code' => 200)); $this->restClient->expects($this->once()) ->method('get') ->with() ->will($this->returnValue($curlResponse)); $response = $this->listService->getLists('access_token'); $this->assertInstanceOf("Ctct\Components\Contacts\ContactList", $response[0]); $this->assertEquals(1, $response[0]->id); $this->assertEquals("General Interest", $response[0]->name); $this->assertEquals("ACTIVE", $response[0]->status); $this->assertEquals(17, $response[0]->contact_count); $this->assertEquals(3, $response[1]->id); $this->assertEquals("mod_Test List 1", $response[1]->name); $this->assertEquals("HIDDEN", $response[1]->status); $this->assertEquals(18, $response[1]->contact_count); } public function testGetListsModifiedSince() { $curlResponse = CurlResponse::create(JsonLoader::getListsJson(), array('http_code' => 200)); $this->restClient->expects($this->once()) ->method('get') ->with() ->will($this->returnValue($curlResponse)); $response = $this->listService->getLists('access_token', array('modified_since' => '2013-01-12T20:04:59.436Z')); $this->assertInstanceOf("Ctct\Components\Contacts\ContactList", $response[0]); $this->assertEquals(1, $response[0]->id); $this->assertEquals("General Interest", $response[0]->name); $this->assertEquals("ACTIVE", $response[0]->status); $this->assertEquals(17, $response[0]->contact_count); $this->assertEquals(3, $response[1]->id); $this->assertEquals("mod_Test List 1", $response[1]->name); $this->assertEquals("HIDDEN", $response[1]->status); $this->assertEquals(18, $response[1]->contact_count); } public function testGetList() { $curlResponse = CurlResponse::create(JsonLoader::getListJson(), array('http_code' => 200)); $this->restClient->expects($this->once()) ->method('get') ->with() ->will($this->returnValue($curlResponse)); $list = $this->listService->getList('access_token', 6); $this->assertInstanceOf("Ctct\Components\Contacts\ContactList", $list); $this->assertEquals(6, $list->id); $this->assertEquals("Test List 4", $list->name); $this->assertEquals("HIDDEN", $list->status); $this->assertEquals(19, $list->contact_count); } public function testAddList() { $curlResponse = CurlResponse::create(JsonLoader::getListJson(), array('http_code' => 204)); $this->restClient->expects($this->once()) ->method('post') ->with() ->will($this->returnValue($curlResponse)); $list = $this->listService->addList('access_token', new ContactList()); $this->assertInstanceOf("Ctct\Components\Contacts\ContactList", $list); $this->assertEquals(6, $list->id); $this->assertEquals("Test List 4", $list->name); $this->assertEquals("HIDDEN", $list->status); $this->assertEquals(19, $list->contact_count); } public function testUpdateList() { $curlResponse = CurlResponse::create(JsonLoader::getListJson(), array('http_code' => 200)); $this->restClient->expects($this->once()) ->method('put') ->with() ->will($this->returnValue($curlResponse)); $list = $this->listService->updateList('access_token', new ContactList()); $this->assertInstanceOf("Ctct\Components\Contacts\ContactList", $list); $this->assertEquals(6, $list->id); $this->assertEquals("Test List 4", $list->name); $this->assertEquals("HIDDEN", $list->status); $this->assertEquals(19, $list->contact_count); } public function testGetContactsFromList() { $curlResponse = CurlResponse::create(JsonLoader::getContactsJson(), array('http_code' => 200)); $this->restClient->expects($this->once()) ->method('get') ->with() ->will($this->returnValue($curlResponse)); $response = $this->listService->getContactsFromList('access_token', 1); $this->assertInstanceOf("Ctct\Components\ResultSet", $response); $contact = $response->results[1]; $this->assertEquals(231, $contact->id); $this->assertEquals("ACTIVE", $contact->status); $this->assertEquals("", $contact->fax); $this->assertEquals("", $contact->prefix_name); $this->assertEquals("Jimmy", $contact->first_name); $this->assertEquals("", $contact->middle_name); $this->assertEquals("Roving", $contact->last_name); $this->assertEquals("Bear Tamer", $contact->job_title); $this->assertEquals("Animal Trainer Pro", $contact->company_name); $this->assertEquals("details", $contact->source_details); $this->assertEquals(false, $contact->confirmed); $this->assertEquals("", $contact->source); // custom fields $this->assertEquals("CustomField1", $contact->custom_fields[0]->name); $this->assertEquals("1", $contact->custom_fields[0]->value); //addresses $this->assertEquals("Suite 101", $contact->addresses[0]->line1); $this->assertEquals("line2", $contact->addresses[0]->line2); $this->assertEquals("line3", $contact->addresses[0]->line3); $this->assertEquals("Brookfield", $contact->addresses[0]->city); $this->assertEquals("PERSONAL", $contact->addresses[0]->address_type); $this->assertEquals("WI", $contact->addresses[0]->state_code); $this->assertEquals("us", $contact->addresses[0]->country_code); $this->assertEquals("53027", $contact->addresses[0]->postal_code); $this->assertEquals("", $contact->addresses[0]->sub_postal_code); //notes $this->assertEquals(0, count($contact->notes)); //lists $this->assertEquals(1, $contact->lists[0]->id); $this->assertEquals("ACTIVE", $contact->lists[0]->status); // EmailAddress $this->assertEquals("ACTIVE", $contact->email_addresses[0]->status); $this->assertEquals("NO_CONFIRMATION_REQUIRED", $contact->email_addresses[0]->confirm_status); $this->assertEquals("ACTION_BY_OWNER", $contact->email_addresses[0]->opt_in_source); $this->assertEquals("2012-06-22T10:29:09.976Z", $contact->email_addresses[0]->opt_in_date); $this->assertEquals("", $contact->email_addresses[0]->opt_out_date); $this->assertEquals("[email protected]", $contact->email_addresses[0]->email_address); } }
Java
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <!-- /home/qt/mkdist-qt-4.4.3-1222864207/qt-embedded-linux-opensource-src-4.4.3/doc/src/tutorials/tutorial.qdoc --> <head> <title>Qt 4.4: Qt Tutorial 12 - Hanging in the Air the Way Bricks Don't</title> <link rel="prev" href="tutorials-tutorial-t11.html" /> <link rel="contents" href="tutorials-tutorial.html" /> <link rel="next" href="tutorials-tutorial-t13.html" /> <link href="classic.css" rel="stylesheet" type="text/css" /> </head> <body> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr> <td align="left" valign="top" width="32"><a href="http://www.trolltech.com/products/qt"><img src="images/qt-logo.png" align="left" border="0" /></a></td> <td width="1">&nbsp;&nbsp;</td><td class="postheader" valign="center"><a href="index.html"><font color="#004faf">Home</font></a>&nbsp;&middot; <a href="namespaces.html"><font color="#004faf">All&nbsp;Namespaces</font></a>&nbsp;&middot; <a href="classes.html"><font color="#004faf">All&nbsp;Classes</font></a>&nbsp;&middot; <a href="mainclasses.html"><font color="#004faf">Main&nbsp;Classes</font></a>&nbsp;&middot; <a href="groups.html"><font color="#004faf">Grouped&nbsp;Classes</font></a>&nbsp;&middot; <a href="modules.html"><font color="#004faf">Modules</font></a>&nbsp;&middot; <a href="functions.html"><font color="#004faf">Functions</font></a></td> <td align="right" valign="top" width="230"></td></tr></table><p> [Previous: <a href="tutorials-tutorial-t11.html">Chapter 11</a>] [<a href="tutorials-tutorial.html">Qt Tutorial</a>] [Next: <a href="tutorials-tutorial-t13.html">Chapter 13</a>] </p> <h1 class="title">Qt Tutorial 12 - Hanging in the Air the Way Bricks Don't<br /><span class="subtitle"></span> </h1> <p>Files:</p> <ul> <li><a href="tutorials-tutorial-t12-cannonfield-cpp.html">tutorials/tutorial/t12/cannonfield.cpp</a></li> <li><a href="tutorials-tutorial-t12-cannonfield-h.html">tutorials/tutorial/t12/cannonfield.h</a></li> <li><a href="tutorials-tutorial-t12-lcdrange-cpp.html">tutorials/tutorial/t12/lcdrange.cpp</a></li> <li><a href="tutorials-tutorial-t12-lcdrange-h.html">tutorials/tutorial/t12/lcdrange.h</a></li> <li><a href="tutorials-tutorial-t12-main-cpp.html">tutorials/tutorial/t12/main.cpp</a></li> <li><a href="tutorials-tutorial-t12-t12-pro.html">tutorials/tutorial/t12/t12.pro</a></li> </ul> <p align="center"><img src="images/t12.png" alt="Screenshot of Chapter 12" /></p><p>In this example, we extend our <tt>LCDRange</tt> class to include a text label. We also provide something to shoot at.</p> <a name="line-by-line-walkthrough"></a> <h2>Line by Line Walkthrough</h2> <a name="t12-lcdrange-h"></a> <h3><a href="tutorials-tutorial-t12-lcdrange-h.html">t12/lcdrange.h</a></h3> <p>The <tt>LCDRange</tt> now has a text label.</p> <pre> class QLabel; class QSlider;</pre> <p>We forward declare <a href="qlabel.html">QLabel</a> and <a href="qslider.html">QSlider</a> because we want to use pointers to them in the class definition. We could also use <tt>#include</tt>, but that would slow down compilation for nothing.</p> <pre> class LCDRange : public QWidget { Q_OBJECT public: LCDRange(QWidget *parent = 0); LCDRange(const QString &amp;text, QWidget *parent = 0);</pre> <p>We have added a new constructor that sets the label text in addition to the parent.</p> <pre> QString text() const;</pre> <p>This function returns the label text.</p> <pre> void setText(const QString &amp;text);</pre> <p>This slot sets the label text.</p> <pre> private: void init();</pre> <p>Because we now have two constructors, we have chosen to put the common initialization in the private <tt>init()</tt> function.</p> <pre> QLabel *label;</pre> <p>We also have a new private variable: a <a href="qlabel.html">QLabel</a>. <a href="qlabel.html">QLabel</a> is one of Qt's standard widgets and can show a text or a <a href="qpixmap.html">QPixmap</a> with or without a frame.</p> <a name="t12-lcdrange-cpp"></a> <h3><a href="tutorials-tutorial-t12-lcdrange-cpp.html">t12/lcdrange.cpp</a></h3> <pre> LCDRange::LCDRange(QWidget *parent) : QWidget(parent) { init(); }</pre> <p>This constructor calls the <tt>init()</tt> function, which contains the common initialization code.</p> <pre> LCDRange::LCDRange(const QString &amp;text, QWidget *parent) : QWidget(parent) { init(); setText(text); }</pre> <p>This constructor first calls <tt>init()</tt> and then sets the label text.</p> <pre> void LCDRange::init() { QLCDNumber *lcd = new QLCDNumber(2); lcd-&gt;setSegmentStyle(QLCDNumber::Filled); slider = new QSlider(Qt::Horizontal); slider-&gt;setRange(0, 99); slider-&gt;setValue(0); label = new QLabel; label-&gt;setAlignment(Qt::AlignHCenter | Qt::AlignTop); connect(slider, SIGNAL(valueChanged(int)), lcd, SLOT(display(int))); connect(slider, SIGNAL(valueChanged(int)), this, SIGNAL(valueChanged(int))); QVBoxLayout *layout = new QVBoxLayout; layout-&gt;addWidget(lcd); layout-&gt;addWidget(slider); layout-&gt;addWidget(label); setLayout(layout); setFocusProxy(slider); }</pre> <p>The setup of <tt>lcd</tt> and <tt>slider</tt> is the same as in the previous chapter. Next we create a <a href="qlabel.html">QLabel</a> and tell it to align the contents centered horizontally and to the top vertically. The <a href="qobject.html#connect">QObject::connect</a>() calls have also been taken from the previous chapter.</p> <pre> QString LCDRange::text() const { return label-&gt;text(); }</pre> <p>This function returns the label text.</p> <pre> void LCDRange::setText(const QString &amp;text) { label-&gt;setText(text); }</pre> <p>This function sets the label text.</p> <a name="t12-cannonfield-h"></a> <h3><a href="tutorials-tutorial-t12-cannonfield-h.html">t12/cannonfield.h</a></h3> <p>The <tt>CannonField</tt> now has two new signals: <tt>hit()</tt> and <tt>missed()</tt>. In addition, it contains a target.</p> <pre> void newTarget();</pre> <p>This slot creates a target at a new position.</p> <pre> signals: void hit(); void missed();</pre> <p>The <tt>hit()</tt> signal is emitted when a shot hits the target. The <tt>missed()</tt> signal is emitted when the shot moves beyond the right or bottom edge of the widget (i.e&#x2e;, it is certain that it has not and will not hit the target).</p> <pre> void paintTarget(QPainter &amp;painter);</pre> <p>This private function paints the target.</p> <pre> QRect targetRect() const;</pre> <p>This private function returns the enclosing rectangle of the target.</p> <pre> QPoint target;</pre> <p>This private variable contains the center point of the target.</p> <a name="t12-cannonfield-cpp"></a> <h3><a href="tutorials-tutorial-t12-cannonfield-cpp.html">t12/cannonfield.cpp</a></h3> <pre> #include &lt;stdlib.h&gt;</pre> <p>We include the <tt>&lt;stdlib.h&gt;</tt> header file because we need the <tt>qrand()</tt> function.</p> <pre> newTarget();</pre> <p>This line has been added to the constructor. It creates a &quot;random&quot; position for the target. In fact, the <tt>newTarget()</tt> function will try to paint the target. Because we are in a constructor, the <tt>CannonField</tt> widget is invisible. Qt guarantees that no harm is done when calling <a href="qwidget.html#update">QWidget::update</a>() on a hidden widget.</p> <pre> void CannonField::newTarget() { static bool firstTime = true; if (firstTime) { firstTime = false; QTime midnight(0, 0, 0); qsrand(midnight.secsTo(QTime::currentTime())); } target = QPoint(200 + qrand() % 190, 10 + qrand() % 255); update(); }</pre> <p>This private function creates a target center point at a new random position.</p> <p>We use the <tt>qrand()</tt> function to fetch random integers. The <tt>qrand()</tt> function normally returns the same series of numbers each time you run a program. This would make the target appear at the same position every time. To avoid this, we must set a random seed the first time this function is called. The random seed must also be random in order to avoid equal random number series. The solution is to use the number of seconds that have passed since midnight as a pseudo-random value.</p> <p>First we create a static <tt>bool</tt> local variable. A static variable like this one is guaranteed to keep its value between calls to the function.</p> <p>The <tt>if</tt> test will succeed only the first time this function is called because we set <tt>firstTime</tt> to <tt>false</tt> inside the <tt>if</tt> block.</p> <p>Then we create the <a href="qtime.html">QTime</a> object <tt>midnight</tt>, which represents the time 00:00:00. Next we fetch the number of seconds from midnight until now and use it as a random seed. See the documentation for <a href="qdate.html">QDate</a>, <a href="qtime.html">QTime</a>, and <a href="qdatetime.html">QDateTime</a> for more information.</p> <p>Finally we calculate the target's center point. We keep it within the rectangle (<i>x</i> = 200, <i>y</i> = 35, <i>width</i> = 190, <i>height</i> = 255), i.e&#x2e;, the possible <i>x</i> and <i>y</i> values are 200 to 389 and 35 to 289, respectively) in a coordinate system where we put <i>y</i> position 0 at the bottom edge of the widget and let <i>y</i> values increase upwards <i>x</i> is as normal, with 0 at the left edge and with <i>x</i> values increasing to the right.</p> <p>By experimentation we have found this to always be in reach of the shot.</p> <pre> void CannonField::moveShot() { QRegion region = shotRect(); ++timerCount; QRect shotR = shotRect();</pre> <p>This part of the timer event has not changed from the previous chapter.</p> <pre> if (shotR.intersects(targetRect())) { autoShootTimer-&gt;stop(); emit hit();</pre> <p>This <tt>if</tt> statement checks whether the shot rectangle intersects the target rectangle. If it does, the shot has hit the target (ouch!). We stop the shoot timer and emit the <tt>hit()</tt> signal to tell the outside world that a target was destroyed, and return.</p> <p>Note that we could have created a new target on the spot, but because the <tt>CannonField</tt> is a component we leave such decisions to the user of the component.</p> <pre> } else if (shotR.x() &gt; width() || shotR.y() &gt; height()) { autoShootTimer-&gt;stop(); emit missed();</pre> <p>This <tt>if</tt> statement is the same as in the previous chapter, except that it now emits the <tt>missed()</tt> signal to tell the outside world about the failure.</p> <pre> } else { region = region.unite(shotR); } update(region); }</pre> <p>And the rest of the function is as before.</p> <p><tt>CannonField::paintEvent()</tt> is as before, except that this has been added:</p> <pre> paintTarget(painter);</pre> <p>This line makes sure that the target is also painted when necessary.</p> <pre> void CannonField::paintTarget(QPainter &amp;painter) { painter.setPen(Qt::black); painter.setBrush(Qt::red); painter.drawRect(targetRect()); }</pre> <p>This private function paints the target; a rectangle filled with red and with a black outline.</p> <pre> QRect CannonField::targetRect() const { QRect result(0, 0, 20, 10); result.moveCenter(QPoint(target.x(), height() - 1 - target.y())); return result; }</pre> <p>This private function returns the enclosing rectangle of the target. Remember from <tt>newTarget()</tt> that the <tt>target</tt> point uses <i>y</i> coordinate 0 at the bottom of the widget. We calculate the point in widget coordinates before we call <a href="qrect.html#moveCenter">QRect::moveCenter</a>().</p> <p>The reason we have chosen this coordinate mapping is to fix the distance between the target and the bottom of the widget. Remember that the widget can be resized by the user or the program at any time.</p> <a name="t12-main-cpp"></a> <h3><a href="tutorials-tutorial-t12-main-cpp.html">t12/main.cpp</a></h3> <p>There are no new members in the <tt>MyWidget</tt> class, but we have slightly changed the constructor to set the new <tt>LCDRange</tt> text labels.</p> <pre> LCDRange *angle = new LCDRange(tr(&quot;ANGLE&quot;));</pre> <p>We set the angle text label to &quot;ANGLE&quot;.</p> <pre> LCDRange *force = new LCDRange(tr(&quot;FORCE&quot;));</pre> <p>We set the force text label to &quot;FORCE&quot;.</p> <a name="running-the-application"></a> <h2>Running the Application</h2> <p>The <tt>LCDRange</tt> widgets look a bit strange: When resizing <tt>MyWidget</tt>, the built-in layout management in <a href="qvboxlayout.html">QVBoxLayout</a> gives the labels too much space and the rest not enough; making the space between the two <tt>LCDRange</tt> widgets change size. We'll fix that in the next chapter.</p> <a name="exercises"></a> <h2>Exercises</h2> <p>Make a cheat button that, when pressed, makes the <tt>CannonField</tt> display the shot trajectory for five seconds.</p> <p>If you did the &quot;round shot&quot; exercise from the previous chapter, try changing the <tt>shotRect()</tt> to a <tt>shotRegion()</tt> that returns a <a href="qregion.html">QRegion</a> so you can have really accurate collision detection.</p> <p>Make a moving target.</p> <p>Make sure that the target is always created entirely on-screen.</p> <p>Make sure that the widget cannot be resized so that the target isn't visible. [Hint: <a href="qwidget.html#minimumSize-prop">QWidget::setMinimumSize</a>() is your friend.]</p> <p>Not easy; make it possible to have several shots in the air at the same time. [Hint: Make a <tt>Shot</tt> class.]</p> <p> [Previous: <a href="tutorials-tutorial-t11.html">Chapter 11</a>] [<a href="tutorials-tutorial.html">Qt Tutorial</a>] [Next: <a href="tutorials-tutorial-t13.html">Chapter 13</a>] </p> <p /><address><hr /><div align="center"> <table width="100%" cellspacing="0" border="0"><tr class="address"> <td width="30%" align="left">Copyright &copy; 2008 Nokia</td> <td width="40%" align="center"><a href="trademarks.html">Trademarks</a></td> <td width="30%" align="right"><div align="right">Qt 4.4.3</div></td> </tr></table></div></address></body> </html>
Java
/* Embedded CSS Before moving to full version - check twice! NO DUPLICATES */ #icon-wpcf { background: url('../images/logo-32.png') no-repeat; } #icon-wpcf-access { background: url('../images/access-icon-32x32.png') no-repeat; } #icon-wpcf-search { background: url('../images/search_36x34.png') no-repeat; } .wpcf-ajax-loading { background: url('../images/ajax-loader-big.gif') no-repeat; width: 32px; height: 32px; } .wpcf-ajax-loading-small { background: url('../images/ajax-loader-small.gif') no-repeat; width: 16px; height: 16px; } /* FORMS */ .wpcf-form-fieldset { background-color: #ffffff; padding: 0 15px 15px 15px; border: 1px solid #cccccc; border-color: #cccccc; margin: 15px 0 25px 0; } .wpcf-form-fieldset fieldset { margin-bottom: 0; } .wpcf-fields-form fieldset { width: auto; } .wpcf-form-fieldset legend { font-weight: bold; } .wpcf-form-fieldset .legend-collapsed { padding-left: 15px; background-image: url('../images/expand.png'); background-repeat: no-repeat; background-position: 0px 2px; cursor: pointer; } .wpcf-form-fieldset .legend-expanded { padding-left: 15px; background-image: url('../images/collapse.png'); background-repeat: no-repeat; background-position: 0px 3px; cursor: pointer; } .wpcf-form-fieldset .collapsed { display: none; } .wpcf-form-item { margin-bottom: 25px; } .wpcf-form-fieldset .wpcf-form-item:first-child { margin-top: 5px; } .wpcf-form-item .wpcf-form-item { margin-bottom: 0; } .wpcf-form-submit { margin-top: 15px; } .wpcf-form-description { font-size: 0.85em; font-style: italic; margin-bottom: 5px; } .wpcf-form-description-fieldset { font-size: 1em; font-style: normal; margin: 10px 0; } .wpcf-form-textarea { width: 100%; } .wpcf-form-description-textarea, .wpcf-form-description-checkboxes, .wpcf-form-description-radios { font-size: 1em; font-style: normal; margin-bottom: 5px; } .wpcf-form-label { white-space: nowrap; } .wpcf-form-textfield-label { font-size: 1em; font-weight: bold; display: block; } .wpcf-form-textfield { width: 200px; } .wpcf-form-item-file label { font-size: 1em; font-weight: bold; display: block; } .wpcf-form-item-textarea label, .wpcf-form-title-checkboxes, .wpcf-form-title-radios, .wpcf-form-select-label { font-size: 1em; font-weight: bold; } .wpcf-form-item-textarea label { display: block; } .wpcf-form-error { background-color: #ffffe0; border: 1px solid #e6db55; padding: 5px 10px; width: auto; margin: 10px 0; display: block; } input.wpcf-form-error { background-color: #F8F8F8; border-color: Red !important; } .wpcf-form-fields-align-right { float: left; width: 250px; margin-top: 0; margin-left: 450px; /* position: absolute;*/ position: fixed; /* THIS IS ALSO SET IN JS AFTER ADDING SCROLL */ clear: both; z-index: 12; } .wpcf-form-fields-align-right fieldset { width: 250px; background-color: #ffffff; } .wpcf-form-fields-align-right a.wpcf-fields-add-ajax-link { margin: 3px 5px 2px 0; float: left; } .wpcf-fields-form .ui-draggable .wpcf-form-fieldset .wpcf-form-fieldset legend { cursor: pointer; } .wpcf-fields-form .ui-sortable { padding: 0 0 10px 0; } .wpcf-fields-form .ui-sortable-placeholder { border: 1px dashed #CCCCCC; width: auto; visibility: visible !important; } .wpcf-form-fields-delete, .wpcf-fields-form-move-field { float: left; margin-top: 10px; margin-top: 3px; margin-right: 5px; } .wpcf-fields-form-move-field { cursor: move; } .wpcf-fields-form .taxonomy-title { margin-top: 10px; font-style: italic; } /* LIST */ #wpcf_groups_list th { white-space: nowrap; } #wpcf-table-group_name { width: 250px; } #wpcf-table-group_taxonomies { width: 200px; } #wpcf-form-fields-main { width: 400px; } /* STRANGE */ #ui-datepicker-div { display: none; } .wpcf-shortcode { margin-top: 5px; } .wpcf-pointer { cursor: pointer; } .wpcf-fields-form-validate-table { padding: 0; margin: 0; width: 100%; border: 1px solid #D2D2D2; } .wpcf-fields-form-validate-table td { padding: 5px 10px; margin: 0; } .wpcf-fields-form-validate-table thead tr { background-color: #E8E8E8; font-weight: bold; } .wpcf-fields-form-validate-table thead td { border-bottom: 1px solid #D2D2D2; } .wpcf-fields-form-validate-table tbody tr:nth-child(odd) { background-color: #F7F7F7; } .wpcf-fields-form-validate-table tbody tr:nth-child(even) { background-color: #EEEEEE; } .wpcf-fields-form-validate-table td .textfield{ width: 100%; } .wpcf-fields-form-radio-move-field { cursor: move; } /* TYPES FORM */ #wpcf-types-form-name-table, #wpcf-types-form-visibility-table, #wpcf-types-form-labels-table, #wpcf-types-form-taxonomies-table, #wpcf-types-form-supports-table, .wpcf-types-form-table { margin-bottom: 20px; padding-bottom: 5px; } #wpcf-types-form-name-table td, #wpcf-types-form-visibility-table td, #wpcf-types-form-labels-table td, #wpcf-types-form-taxonomies-table td, #wpcf-types-form-supports-table td, .wpcf-types-form-table td { border: none; } #wpcf-types-form-name-table tbody tr td:first-child { text-align: right; } #wpcf-types-form-name-table tbody tr:first-child td { padding-top: 10px; } #wpcf-types-form-name-table input { width: 100%; } #wpcf-types-form-name-table label { font-weight: normal; } #wpcf-types-form-visibility-table tbody table { margin-top: 5px; } #wpcf-types-form-visibility-table tbody table td { padding: 0; vertical-align: middle; } #wpcf-types-form-visibility-table tbody table tr td:first-child { text-align: right; } #wpcf-types-form-visibility-table tbody table label { font-weight: normal; } #wpcf-types-form-labels-table tbody tr td:first-child { text-align: right; } #wpcf-types-form-labels-table tbody label { font-weight: normal; } #wpcf-types-form-labels-table tbody td { vertical-align: middle; } #wpcf-types-form-labels-table .wpcf-form-description { font-size: 0.9em; line-height: 1.2em; } #wpcf-types-form-labels-table tbody tr:first-child td { padding-top: 15px; } #wpcf-types-form-rewrite-toggle { margin: 0 0 20px 0; } /*CHECKBOXES*/ .wpcf-checkboxes-drag { position: absolute; } .wpcf-checkboxes-drag img { cursor: pointer; } .wpcf-fields-checkboxes-draggable legend { background-position: 15px 2px !important; background-repeat: no-repeat; cursor: pointer; padding-left: 30px !important; } .wpcf-message { padding: 0 0.6em; border-radius: 3px 3px 3px 3px; border-style: solid; border-width: 1px; margin: 1em 0 1em 0; } .wpcf-error { background-color: #FFEBE8; border-color: #CC0000; padding: 5px; } .wpcf-admin-fields-help { margin-bottom: 10px; } .wpcf-help-link { display:inline-block; min-height:19px; height:19px; font-size:11px !important; line-height:19px; min-width:19px; padding-left:20px; padding-top:4px; background:url('../../common/res/images/question.png') no-repeat 0 6px; text-decoration:none !important; position:relative; z-index:100; /*color:#FFFFFF !important;*/ float:right; margin: -20px 45px 0 0; } .wpcf-help-link:hover { text-decoration:underline !important; color:#d54e21 !important; background-position:0 -19px; } .wpcf-form-options-header-title { position: absolute; margin: -18px 0 0 40px; } .wpcf-form-options-header-value { position: absolute; margin: -18px 0 0 125px; } .wpcf-loading { width: 20px; height: 16px; background: url('../images/wpspin.gif') no-repeat 2px 0; display: inline-block; } .wpcf-hide { display: none; } .wpcf-show { /*display: block;*/ } /* SETTINGS PAGE */ .horlist { margin-bottom: 12px; } .horlist li { float: left; line-height: 20px; margin-right: 10px; } #wpcf-ajax * { width: auto; } #wpcf-ajax #wpcontent { margin: 0px; } #wpcf-ajax #wpbody-content{ padding: 20px; width: 80%; } #wpcf-ajax #wpfooter { clear: both; display: none; } .wpcf-editor-popup-advanced-link { display: block; padding: 10px 0; outline: 0 !important; border: 0 !important; } .wpcf-editor-popup-advanced-link a:hover, .wpcf-editor-popup-advanced-link a:active, .wpcf-editor-popup-advanced-link a:focus { outline: 0 !important; border: 0 !important; } .modman-inline-table { margin-top: 20px; } .wpcf-pagination-top { margin-bottom: 10px; } .types-ajax #wpwrap { margin-top: 20px; } .types-small-italic { font-size: 0.9em; font-style: italic; } .types-ajax #screen-meta-links, .types-ajax #screen-meta { display: none; }
Java
/* * Jeremy Compostella <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include "htckbdhandler.h" #include <QFile> #include <QTextStream> #include <QSocketNotifier> #include <QDebug> #include <fcntl.h> #include <unistd.h> #include <htckbdmap.h> #include <linux/input.h> htcKbdHandler::htcKbdHandler(const QString &device) : modifiers(Qt::NoModifier), capsLock(false) { setObjectName("Htc keyboard Handler"); kbdFD = ::open(device.toLocal8Bit().constData(), O_RDONLY, 0); if (kbdFD >= 0) { m_notify = new QSocketNotifier(kbdFD, QSocketNotifier::Read, this); connect(m_notify, SIGNAL(activated(int)), this, SLOT(processEvent())); } else qWarning("Cannot open %s", device.toLocal8Bit().constData()); } htcKbdHandler::~htcKbdHandler() { } void htcKbdHandler::processEvent() { #define MAX_EVENT 10 struct input_event events[MAX_EVENT]; unsigned int i; int n = read(kbdFD, &events, sizeof(struct input_event) * MAX_EVENT); for (i = 0 ; i < n / sizeof(struct input_event) ; ++i) processEvent(events[i]); } void htcKbdHandler::processEvent(struct input_event event) { static struct input_event previous = { {0, 0}, 0, 0, 0}; static uint previous_nb = 0; struct QWSKeyMap key; if (event.code > keyMSize || event.code == 0) return; #define MIN_REPEAT 5 key = htcuniversalKeyMap[event.code]; if ((event.code == previous.code && (previous.value == 0 ? !event.value : event.value)) && (key.key_code == Qt::Key_Control || key.key_code == Qt::Key_Shift || key.key_code == Qt::Key_CapsLock || key.key_code == Qt::Key_Alt || previous_nb++ <= MIN_REPEAT)) return; if (event.code != previous.code) previous_nb = 0; if (key.key_code == Qt::Key_Control) modifiers ^= Qt::ControlModifier; if (key.key_code == Qt::Key_Shift) modifiers ^= Qt::ShiftModifier; if (key.key_code == Qt::Key_Alt) modifiers ^= Qt::AltModifier; if (key.key_code == Qt::Key_CapsLock && event.value == 0) capsLock = !capsLock; ushort unicode = key.unicode; if (modifiers & Qt::ShiftModifier && !capsLock) unicode = key.shift_unicode; else if (modifiers & Qt::ControlModifier) unicode = key.ctrl_unicode; else if (modifiers & Qt::AltModifier) unicode = key.alt_unicode; else if (capsLock && !(modifiers & Qt::ShiftModifier)) unicode = key.shift_unicode; processKeyEvent(unicode, key.key_code, modifiers, event.value != 0, false); previous = event; }
Java
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/commit.c * * Written by Stephen C. Tweedie <[email protected]>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * Journal commit routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bitops.h> #include <trace/events/jbd2.h> /* * IO end handler for temporary buffer_heads handling writes to the journal. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { struct buffer_head *orig_bh = bh->b_private; BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); if (orig_bh) { clear_bit_unlock(BH_Shadow, &orig_bh->b_state); smp_mb__after_atomic(); wake_up_bit(&orig_bh->b_state, BH_Shadow); } unlock_buffer(bh); } /* * When an ext4 file is truncated, it is possible that some pages are not * successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under lock_journal(), and possibly under journal_datalist_lock. The * caller provided us with a ref against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; page = bh->b_page; if (!page) goto nope; if (page->mapping) goto nope; /* OK, it's a truncated page */ if (!trylock_page(page)) goto nope; get_page(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); put_page(page); return; nope: __brelse(bh); } static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) { struct commit_header *h; __u32 csum; if (!jbd2_journal_has_csum_v2or3(j)) return; h = (struct commit_header *)(bh->b_data); h->h_chksum_type = 0; h->h_chksum_size = 0; h->h_chksum[0] = 0; csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); h->h_chksum[0] = cpu_to_be32(csum); } /* * Done it all: now submit the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_submit_commit_record(journal_t *journal, transaction_t *commit_transaction, struct buffer_head **cbh, __u32 crc32_sum) { struct commit_header *tmp; struct buffer_head *bh; int ret; struct timespec64 now; *cbh = NULL; if (is_journal_aborted(journal)) return 0; bh = jbd2_journal_get_descriptor_buffer(commit_transaction, JBD2_COMMIT_BLOCK); if (!bh) return 1; tmp = (struct commit_header *)bh->b_data; ktime_get_coarse_real_ts64(&now); tmp->h_commit_sec = cpu_to_be64(now.tv_sec); tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec); if (jbd2_has_feature_checksum(journal)) { tmp->h_chksum_type = JBD2_CRC32_CHKSUM; tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; tmp->h_chksum[0] = cpu_to_be32(crc32_sum); } jbd2_commit_block_csum_set(journal, bh); BUFFER_TRACE(bh, "submit commit block"); lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; if (journal->j_flags & JBD2_BARRIER && !jbd2_has_feature_async_commit(journal)) ret = submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh); else ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); *cbh = bh; return ret; } /* * This function along with journal_submit_commit_record * allows to write the commit record asynchronously. */ static int journal_wait_on_commit_record(journal_t *journal, struct buffer_head *bh) { int ret = 0; clear_buffer_dirty(bh); wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) ret = -EIO; put_bh(bh); /* One for getblk() */ return ret; } /* * write the filemap data using writepage() address_space_operations. * We don't do block allocation here even for delalloc. We don't * use writepages() because with dealyed allocation we may be doing * block allocation in writepages(). */ static int journal_submit_inode_data_buffers(struct address_space *mapping, loff_t dirty_start, loff_t dirty_end) { int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = mapping->nrpages * 2, .range_start = dirty_start, .range_end = dirty_end, }; ret = generic_writepages(mapping, &wbc); return ret; } /* * Submit all the data buffers of inode associated with the transaction to * disk. * * We are in a committing transaction. Therefore no new inode can be added to * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently * operate on from being released while we write out pages. */ static int journal_submit_data_buffers(journal_t *journal, transaction_t *commit_transaction) { struct jbd2_inode *jinode; int err, ret = 0; struct address_space *mapping; spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { loff_t dirty_start = jinode->i_dirty_start; loff_t dirty_end = jinode->i_dirty_end; if (!(jinode->i_flags & JI_WRITE_DATA)) continue; mapping = jinode->i_vfs_inode->i_mapping; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); /* * submit the inode data buffers. We use writepage * instead of writepages. Because writepages can do * block allocation with delalloc. We need to write * only allocated blocks here. */ trace_jbd2_submit_inode_data(jinode->i_vfs_inode); err = journal_submit_inode_data_buffers(mapping, dirty_start, dirty_end); if (!ret) ret = err; spin_lock(&journal->j_list_lock); J_ASSERT(jinode->i_transaction == commit_transaction); jinode->i_flags &= ~JI_COMMIT_RUNNING; smp_mb(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } spin_unlock(&journal->j_list_lock); return ret; } /* * Wait for data submitted for writeout, refile inodes to proper * transaction if needed. * */ static int journal_finish_inode_data_buffers(journal_t *journal, transaction_t *commit_transaction) { struct jbd2_inode *jinode, *next_i; int err, ret = 0; /* For locking, see the comment in journal_submit_data_buffers() */ spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { loff_t dirty_start = jinode->i_dirty_start; loff_t dirty_end = jinode->i_dirty_end; if (!(jinode->i_flags & JI_WAIT_DATA)) continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); err = filemap_fdatawait_range_keep_errors( jinode->i_vfs_inode->i_mapping, dirty_start, dirty_end); if (!ret) ret = err; spin_lock(&journal->j_list_lock); jinode->i_flags &= ~JI_COMMIT_RUNNING; smp_mb(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } /* Now refile inode to proper lists */ list_for_each_entry_safe(jinode, next_i, &commit_transaction->t_inode_list, i_list) { list_del(&jinode->i_list); if (jinode->i_next_transaction) { jinode->i_transaction = jinode->i_next_transaction; jinode->i_next_transaction = NULL; list_add(&jinode->i_list, &jinode->i_transaction->t_inode_list); } else { jinode->i_transaction = NULL; jinode->i_dirty_start = 0; jinode->i_dirty_end = 0; } } spin_unlock(&journal->j_list_lock); return ret; } static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) { struct page *page = bh->b_page; char *addr; __u32 checksum; addr = kmap_atomic(page); checksum = crc32_be(crc32_sum, (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); kunmap_atomic(addr); return checksum; } static void write_tag_block(journal_t *j, journal_block_tag_t *tag, unsigned long long block) { tag->t_blocknr = cpu_to_be32(block & (u32)~0); if (jbd2_has_feature_64bit(j)) tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); } static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, struct buffer_head *bh, __u32 sequence) { journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; struct page *page = bh->b_page; __u8 *addr; __u32 csum32; __be32 seq; if (!jbd2_journal_has_csum_v2or3(j)) return; seq = cpu_to_be32(sequence); addr = kmap_atomic(page); csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data), bh->b_size); kunmap_atomic(addr); if (jbd2_has_feature_csum3(j)) tag3->t_checksum = cpu_to_be32(csum32); else tag->t_checksum = cpu_to_be16(csum32); } /* * jbd2_journal_commit_transaction * * The primary function for committing a transaction to the log. This * function is called by the journal thread to begin a complete commit. */ void jbd2_journal_commit_transaction(journal_t *journal) { struct transaction_stats_s stats; transaction_t *commit_transaction; struct journal_head *jh; struct buffer_head *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; int err; unsigned long long blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; journal_block_tag_t *tag = NULL; int space_left = 0; int first_tag = 0; int tag_flag; int i; int tag_bytes = journal_tag_bytes(journal); struct buffer_head *cbh = NULL; /* For transactional checksums */ __u32 crc32_sum = ~0; struct blk_plug plug; /* Tail of the journal */ unsigned long first_block; tid_t first_tid; int update_tail; int csum_size = 0; LIST_HEAD(io_bufs); LIST_HEAD(log_bufs); if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); /* * First job: lock down the current transaction and wait for * all outstanding updates to complete. */ /* Do we need to erase the effects of a prior jbd2_journal_flush? */ if (journal->j_flags & JBD2_FLUSHED) { jbd_debug(3, "super block updated\n"); mutex_lock_io(&journal->j_checkpoint_mutex); /* * We hold j_checkpoint_mutex so tail cannot change under us. * We don't need any special data guarantees for writing sb * since journal is empty and it is ok for write to be * flushed only with transaction commit. */ jbd2_journal_update_sb_log_tail(journal, journal->j_tail_sequence, journal->j_tail, REQ_SYNC); mutex_unlock(&journal->j_checkpoint_mutex); } else { jbd_debug(3, "superblock not updated\n"); } J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; trace_jbd2_start_commit(journal, commit_transaction); jbd_debug(1, "JBD2: starting commit of transaction %d\n", commit_transaction->t_tid); write_lock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; stats.run.rs_request_delay = 0; stats.run.rs_locked = jiffies; if (commit_transaction->t_requested) stats.run.rs_request_delay = jbd2_time_diff(commit_transaction->t_requested, stats.run.rs_locked); stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start, stats.run.rs_locked); spin_lock(&commit_transaction->t_handle_lock); while (atomic_read(&commit_transaction->t_updates)) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(&commit_transaction->t_updates)) { spin_unlock(&commit_transaction->t_handle_lock); write_unlock(&journal->j_state_lock); schedule(); write_lock(&journal->j_state_lock); spin_lock(&commit_transaction->t_handle_lock); } finish_wait(&journal->j_wait_updates, &wait); } spin_unlock(&commit_transaction->t_handle_lock); J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <= journal->j_max_transaction_buffers); /* * First thing we are allowed to do is to discard any remaining * BJ_Reserved buffers. Note, it is _not_ permissible to assume * that there are no such buffers: if a large filesystem * operation like a truncate needs to split itself over multiple * transactions, then it may try to do a jbd2_journal_restart() while * there are still BJ_Reserved buffers outstanding. These must * be released cleanly from the current transaction. * * In this case, the filesystem must still reserve write access * again before modifying the buffer in the new transaction, but * we do not require it to remember exactly which old buffers it * has reserved. This is consistent with the existing behaviour * that multiple jbd2_journal_get_write_access() calls to the same * buffer are perfectly permissible. */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; JBUFFER_TRACE(jh, "reserved, unused: refile"); /* * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may * leave undo-committed data. */ if (jh->b_committed_data) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } jbd2_journal_refile_buffer(journal, jh); } /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially * frees some memory */ spin_lock(&journal->j_list_lock); __jbd2_journal_clean_checkpoint_list(journal, false); spin_unlock(&journal->j_list_lock); jbd_debug(3, "JBD2: commit phase 1\n"); /* * Clear revoked flag to reflect there is no revoked buffers * in the next transaction which is going to be started. */ jbd2_clear_buffer_revoked_flags(journal); /* * Switch to a new revoke table. */ jbd2_journal_switch_revoke_table(journal); /* * Reserved credits cannot be claimed anymore, free them */ atomic_sub(atomic_read(&journal->j_reserved_credits), &commit_transaction->t_outstanding_credits); trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, stats.run.rs_flushing); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; wake_up(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); jbd_debug(3, "JBD2: commit phase 2a\n"); /* * Now start flushing things to disk, in the order they appear * on the transaction lists. Data blocks go first. */ err = journal_submit_data_buffers(journal, commit_transaction); if (err) jbd2_journal_abort(journal, err); blk_start_plug(&plug); jbd2_journal_write_revoke_records(commit_transaction, &log_bufs); jbd_debug(3, "JBD2: commit phase 2b\n"); /* * Way to go: we have now written out all of the data for a * transaction! Now comes the tricky part: we need to write out * metadata. Loop over the transaction's entire buffer list: */ write_lock(&journal->j_state_lock); commit_transaction->t_state = T_COMMIT; write_unlock(&journal->j_state_lock); trace_jbd2_commit_logging(journal, commit_transaction); stats.run.rs_logging = jiffies; stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing, stats.run.rs_logging); stats.run.rs_blocks = atomic_read(&commit_transaction->t_outstanding_credits); stats.run.rs_blocks_logged = 0; J_ASSERT(commit_transaction->t_nr_buffers <= atomic_read(&commit_transaction->t_outstanding_credits)); err = 0; bufs = 0; descriptor = NULL; while (commit_transaction->t_buffers) { /* Find the next buffer to be journaled... */ jh = commit_transaction->t_buffers; /* If we're in abort mode, we just un-journal the buffer and release it. */ if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); jbd2_buffer_abort_trigger(jh, jh->b_frozen_data ? jh->b_frozen_triggers : jh->b_triggers); jbd2_journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been * already allocated, even if we are now * aborting. */ if (!commit_transaction->t_buffers) goto start_journal_io; continue; } /* Make sure we have a descriptor block in which to record the metadata buffer. */ if (!descriptor) { J_ASSERT (bufs == 0); jbd_debug(4, "JBD2: get descriptor\n"); descriptor = jbd2_journal_get_descriptor_buffer( commit_transaction, JBD2_DESCRIPTOR_BLOCK); if (!descriptor) { jbd2_journal_abort(journal, -EIO); continue; } jbd_debug(4, "JBD2: got buffer %llu (%p)\n", (unsigned long long)descriptor->b_blocknr, descriptor->b_data); tagp = &descriptor->b_data[sizeof(journal_header_t)]; space_left = descriptor->b_size - sizeof(journal_header_t); first_tag = 1; set_buffer_jwrite(descriptor); set_buffer_dirty(descriptor); wbuf[bufs++] = descriptor; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(descriptor, "ph3: file as descriptor"); jbd2_file_log_bh(&log_bufs, descriptor); } /* Where is the buffer to be written? */ err = jbd2_journal_next_log_block(journal, &blocknr); /* If the block mapping failed, just abandon the buffer and repeat this loop: we'll fall into the refile-on-abort condition above. */ if (err) { jbd2_journal_abort(journal, err); continue; } /* * start_this_handle() uses t_outstanding_credits to determine * the free space in the log, but this counter is changed * by jbd2_journal_next_log_block() also. */ atomic_dec(&commit_transaction->t_outstanding_credits); /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get rid of the shadow pairing of buffers. */ atomic_inc(&jh2bh(jh)->b_count); /* * Make a temporary IO buffer with which to write it out * (this will requeue the metadata buffer to BJ_Shadow). */ set_bit(BH_JWrite, &jh2bh(jh)->b_state); JBUFFER_TRACE(jh, "ph3: write metadata"); flags = jbd2_journal_write_metadata_buffer(commit_transaction, jh, &wbuf[bufs], blocknr); if (flags < 0) { jbd2_journal_abort(journal, flags); continue; } jbd2_file_log_bh(&io_bufs, wbuf[bufs]); /* Record the new block's tag in the current descriptor buffer */ tag_flag = 0; if (flags & 1) tag_flag |= JBD2_FLAG_ESCAPE; if (!first_tag) tag_flag |= JBD2_FLAG_SAME_UUID; tag = (journal_block_tag_t *) tagp; write_tag_block(journal, tag, jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be16(tag_flag); jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], commit_transaction->t_tid); tagp += tag_bytes; space_left -= tag_bytes; bufs++; if (first_tag) { memcpy (tagp, journal->j_uuid, 16); tagp += 16; space_left -= 16; first_tag = 0; } /* If there's no more to do, or if the descriptor is full, let the IO rip! */ if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || space_left < tag_bytes + 16 + csum_size) { jbd_debug(4, "JBD2: Submit %d IOs\n", bufs); /* Write an end-of-descriptor marker before submitting the IOs. "tag" still points to the last tag we set up. */ tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG); start_journal_io: if (descriptor) jbd2_descriptor_block_csum_set(journal, descriptor); for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; /* * Compute checksum. */ if (jbd2_has_feature_checksum(journal)) { crc32_sum = jbd2_checksum_data(crc32_sum, bh); } lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); } cond_resched(); /* Force a new descriptor to be generated next time round the loop. */ descriptor = NULL; bufs = 0; } } err = journal_finish_inode_data_buffers(journal, commit_transaction); if (err) { printk(KERN_WARNING "JBD2: Detected IO errors while flushing file data " "on %s\n", journal->j_devname); if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR) jbd2_journal_abort(journal, err); err = 0; } /* * Get current oldest transaction in the log before we issue flush * to the filesystem device. After the flush we can be sure that * blocks of all older transactions are checkpointed to persistent * storage and we will be safe to update journal start in the * superblock with the numbers we get here. */ update_tail = jbd2_journal_get_log_tail(journal, &first_tid, &first_block); write_lock(&journal->j_state_lock); if (update_tail) { long freed = first_block - journal->j_tail; if (first_block < journal->j_tail) freed += journal->j_last - journal->j_first; /* Update tail only if we free significant amount of space */ if (freed < journal->j_maxlen / 4) update_tail = 0; } J_ASSERT(commit_transaction->t_state == T_COMMIT); commit_transaction->t_state = T_COMMIT_DFLUSH; write_unlock(&journal->j_state_lock); /* * If the journal is not located on the file system device, * then we must flush the file system device before we issue * the commit record */ if (commit_transaction->t_need_data_flush && (journal->j_fs_dev != journal->j_dev) && (journal->j_flags & JBD2_BARRIER)) blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL); /* Done it all: now write the commit record asynchronously. */ if (jbd2_has_feature_async_commit(journal)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) __jbd2_journal_abort_hard(journal); } blk_finish_plug(&plug); /* Lo and behold: we have just managed to send a transaction to the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on the io_bufs list. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and so we incur less scheduling load. */ jbd_debug(3, "JBD2: commit phase 3\n"); while (!list_empty(&io_bufs)) { struct buffer_head *bh = list_entry(io_bufs.prev, struct buffer_head, b_assoc_buffers); wait_on_buffer(bh); cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; jbd2_unfile_log_bh(bh); stats.run.rs_blocks_logged++; /* * The list contains temporary buffer heads created by * jbd2_journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); /* We also have to refile the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); clear_buffer_jwrite(bh); J_ASSERT_BH(bh, buffer_jbddirty(bh)); J_ASSERT_BH(bh, !buffer_shadow(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when we finally commit, we can do any checkpointing required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } J_ASSERT (commit_transaction->t_shadow_list == NULL); jbd_debug(3, "JBD2: commit phase 4\n"); /* Here we wait for the revoke record and descriptor record buffers */ while (!list_empty(&log_bufs)) { struct buffer_head *bh; bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers); wait_on_buffer(bh); cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); jbd2_unfile_log_bh(bh); stats.run.rs_blocks_logged++; __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } if (err) jbd2_journal_abort(journal, err); jbd_debug(3, "JBD2: commit phase 5\n"); write_lock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH); commit_transaction->t_state = T_COMMIT_JFLUSH; write_unlock(&journal->j_state_lock); if (!jbd2_has_feature_async_commit(journal)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) __jbd2_journal_abort_hard(journal); } if (cbh) err = journal_wait_on_commit_record(journal, cbh); stats.run.rs_blocks_logged++; if (jbd2_has_feature_async_commit(journal) && journal->j_flags & JBD2_BARRIER) { blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL); } if (err) jbd2_journal_abort(journal, err); /* * Now disk caches for filesystem device are flushed so we are safe to * erase checkpointed transactions from the log by updating journal * superblock. */ if (update_tail) jbd2_update_log_tail(journal, first_tid, first_block); /* End of a transaction! Finally, we can do checkpoint processing: any buffers committed as a result of this transaction can be removed from any checkpoint list it was on before. */ jbd_debug(3, "JBD2: commit phase 6\n"); J_ASSERT(list_empty(&commit_transaction->t_inode_list)); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); restart_loop: /* * As there are other places (journal_unmap_buffer()) adding buffers * to this list we have to be careful and hold the j_list_lock. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; int try_to_free = 0; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); /* * Get a reference so that bh cannot be freed before we are * done with it. */ get_bh(bh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); /* * If there is undo-protected committed data against * this buffer, then we can remove it now. If it is a * buffer needing such protection, the old frozen_data * field now points to a committed version of the * buffer, so rotate that field to the new committed * data. * * Otherwise, we can just throw away the frozen data now. * * We also know that the frozen data has already fired * its triggers if they exist, so we can clear that too. */ if (jh->b_committed_data) { jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; jh->b_frozen_triggers = NULL; } } else if (jh->b_frozen_data) { jbd2_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; jh->b_frozen_triggers = NULL; } spin_lock(&journal->j_list_lock); cp_transaction = jh->b_cp_transaction; if (cp_transaction) { JBUFFER_TRACE(jh, "remove from old cp transaction"); cp_transaction->t_chp_stats.cs_dropped++; __jbd2_journal_remove_checkpoint(jh); } /* Only re-checkpoint the buffer_head if it is marked * dirty. If the buffer was added to the BJ_Forget list * by jbd2_journal_forget, it may no longer be dirty and * there's no point in keeping a checkpoint record for * it. */ /* * A buffer which has been freed while still being journaled by * a previous transaction. */ if (buffer_freed(bh)) { /* * If the running transaction is the one containing * "add to orphan" operation (b_next_transaction != * NULL), we have to wait for that transaction to * commit before we can really get rid of the buffer. * So just clear b_modified to not confuse transaction * credit accounting and refile the buffer to * BJ_Forget of the running transaction. If the just * committed transaction contains "add to orphan" * operation, we can completely invalidate the buffer * now. We are rather through in that since the * buffer may be still accessible when blocksize < * pagesize and it is attached to the last partial * page. */ jh->b_modified = 0; if (!jh->b_next_transaction) { clear_buffer_freed(bh); clear_buffer_jbddirty(bh); clear_buffer_mapped(bh); clear_buffer_new(bh); clear_buffer_req(bh); bh->b_bdev = NULL; } } if (buffer_jbddirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __jbd2_journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); /* * The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget * list. */ if (!jh->b_next_transaction) try_to_free = 1; } JBUFFER_TRACE(jh, "refile or unfile buffer"); __jbd2_journal_refile_buffer(jh); jbd_unlock_bh_state(bh); if (try_to_free) release_buffer_page(bh); /* Drops bh reference */ else __brelse(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); /* * This is a bit sleazy. We use j_list_lock to protect transition * of a transaction into T_FINISHED state and calling * __jbd2_journal_drop_transaction(). Otherwise we could race with * other checkpointing code processing the transaction... */ write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); /* * Now recheck if some buffers did not get attached to the transaction * while the lock was dropped... */ if (commit_transaction->t_forget) { spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); goto restart_loop; } /* Add the transaction to the checkpoint list * __journal_remove_checkpoint() can not destroy transaction * under us because it is not marked as T_FINISHED yet */ if (journal->j_checkpoint_transactions == NULL) { journal->j_checkpoint_transactions = commit_transaction; commit_transaction->t_cpnext = commit_transaction; commit_transaction->t_cpprev = commit_transaction; } else { commit_transaction->t_cpnext = journal->j_checkpoint_transactions; commit_transaction->t_cpprev = commit_transaction->t_cpnext->t_cpprev; commit_transaction->t_cpnext->t_cpprev = commit_transaction; commit_transaction->t_cpprev->t_cpnext = commit_transaction; } spin_unlock(&journal->j_list_lock); /* Done with this transaction! */ jbd_debug(3, "JBD2: commit phase 7\n"); J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH); commit_transaction->t_start = jiffies; stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging, commit_transaction->t_start); /* * File the transaction statistics */ stats.ts_tid = commit_transaction->t_tid; stats.run.rs_handle_count = atomic_read(&commit_transaction->t_handle_count); trace_jbd2_run_stats(journal->j_fs_dev->bd_dev, commit_transaction->t_tid, &stats.run); stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0; commit_transaction->t_state = T_COMMIT_CALLBACK; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); /* * weight the commit time higher than the average time so we don't * react too strongly to vast changes in the commit time */ if (likely(journal->j_average_commit_time)) journal->j_average_commit_time = (commit_time + journal->j_average_commit_time*3) / 4; else journal->j_average_commit_time = commit_time; write_unlock(&journal->j_state_lock); if (journal->j_commit_callback) journal->j_commit_callback(journal, commit_transaction); trace_jbd2_end_commit(journal, commit_transaction); jbd_debug(1, "JBD2: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); commit_transaction->t_state = T_FINISHED; /* Check if the transaction can be dropped now that we are finished */ if (commit_transaction->t_checkpoint_list == NULL && commit_transaction->t_checkpoint_io_list == NULL) { __jbd2_journal_drop_transaction(journal, commit_transaction); jbd2_journal_free_transaction(commit_transaction); } spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_done_commit); /* * Calculate overall stats */ spin_lock(&journal->j_history_lock); journal->j_stats.ts_tid++; journal->j_stats.ts_requested += stats.ts_requested; journal->j_stats.run.rs_wait += stats.run.rs_wait; journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay; journal->j_stats.run.rs_running += stats.run.rs_running; journal->j_stats.run.rs_locked += stats.run.rs_locked; journal->j_stats.run.rs_flushing += stats.run.rs_flushing; journal->j_stats.run.rs_logging += stats.run.rs_logging; journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count; journal->j_stats.run.rs_blocks += stats.run.rs_blocks; journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged; spin_unlock(&journal->j_history_lock); }
Java
<?php if (!defined('BASEPATH')) exit('No direct script access allowed'); /** * CodeIgniter * * An open source application development framework for PHP 4.3.2 or newer * * @package CodeIgniter * @author ExpressionEngine Dev Team * @copyright Copyright (c) 2006, EllisLab, Inc. * @license http://codeigniter.com/user_guide/license.html * @link http://codeigniter.com * @since Version 1.0 * @filesource */ // ------------------------------------------------------------------------ /** * Common Functions * * Loads the base classes and executes the request. * * @package CodeIgniter * @subpackage codeigniter * @category Common Functions * @author ExpressionEngine Dev Team * @link http://codeigniter.com/user_guide/ */ // ------------------------------------------------------------------------ /** * Tests for file writability * * is_writable() returns TRUE on Windows servers * when you really can't write to the file * as the OS reports to PHP as FALSE only if the * read-only attribute is marked. Ugh? * * @access private * @return void */ function is_really_writable($file) { if (is_dir($file)) { $file = rtrim($file, '/').'/'.md5(rand(1,100)); if (($fp = @fopen($file, 'ab')) === FALSE) { return FALSE; } fclose($fp); @chmod($file, 0777); @unlink($file); return TRUE; } elseif (($fp = @fopen($file, 'ab')) === FALSE) { return FALSE; } fclose($fp); return TRUE; } // ------------------------------------------------------------------------ /** * Class registry * * This function acts as a singleton. If the requested class does not * exist it is instantiated and set to a static variable. If it has * previously been instantiated the variable is returned. * * @access public * @param string the class name being requested * @param bool optional flag that lets classes get loaded but not instantiated * @return object */ function &load_class($class, $instantiate = TRUE) { static $objects = array(); // Does the class exist? If so, we're done... if (isset($objects[$class])) { return $objects[$class]; } // If the requested class does not exist in the application/libraries // folder we'll load the native class from the system/libraries folder. if (file_exists(APPPATH.'libraries/'.config_item('subclass_prefix').$class.EXT)) { require(BASEPATH.'libraries/'.$class.EXT); require(APPPATH.'libraries/'.config_item('subclass_prefix').$class.EXT); $is_subclass = TRUE; } else { if (file_exists(APPPATH.'libraries/'.$class.EXT)) { require(APPPATH.'libraries/'.$class.EXT); $is_subclass = FALSE; } else { require(BASEPATH.'libraries/'.$class.EXT); $is_subclass = FALSE; } } if ($instantiate == FALSE) { $objects[$class] = TRUE; return $objects[$class]; } if ($is_subclass == TRUE) { $name = config_item('subclass_prefix').$class; $objects[$class] =& new $name(); return $objects[$class]; } $name = ($class != 'Controller') ? 'CI_'.$class : $class; $objects[$class] =& new $name(); return $objects[$class]; } /** * Loads the main config.php file * * @access private * @return array */ function &get_config() { static $main_conf; if ( ! isset($main_conf)) { if ( ! file_exists(APPPATH.'config/config'.EXT)) { exit('The configuration file config'.EXT.' does not exist.'); } require(APPPATH.'config/config'.EXT); if ( ! isset($config) OR ! is_array($config)) { exit('Your config file does not appear to be formatted correctly.'); } $main_conf[0] =& $config; } return $main_conf[0]; } /** * Gets a config item * * @access public * @return mixed */ function config_item($item) { static $config_item = array(); if ( ! isset($config_item[$item])) { $config =& get_config(); if ( ! isset($config[$item])) { return FALSE; } $config_item[$item] = $config[$item]; } return $config_item[$item]; } /** * Error Handler * * This function lets us invoke the exception class and * display errors using the standard error template located * in application/errors/errors.php * This function will send the error page directly to the * browser and exit. * * @access public * @return void */ function show_error($message) { $error =& load_class('Exceptions'); echo $error->show_error('An Error Was Encountered', $message); exit; } /** * 404 Page Handler * * This function is similar to the show_error() function above * However, instead of the standard error template it displays * 404 errors. * * @access public * @return void */ function show_404($page = '') { $error =& load_class('Exceptions'); $error->show_404($page); exit; } /** * Error Logging Interface * * We use this as a simple mechanism to access the logging * class and send messages to be logged. * * @access public * @return void */ function log_message($level = 'error', $message, $php_error = FALSE) { static $LOG; $config =& get_config(); if ($config['log_threshold'] == 0) { return; } $LOG =& load_class('Log'); $LOG->write_log($level, $message, $php_error); } /** * Exception Handler * * This is the custom exception handler that is declaired at the top * of Codeigniter.php. The main reason we use this is permit * PHP errors to be logged in our own log files since we may * not have access to server logs. Since this function * effectively intercepts PHP errors, however, we also need * to display errors based on the current error_reporting level. * We do that with the use of a PHP error template. * * @access private * @return void */ function _exception_handler($severity, $message, $filepath, $line) { // We don't bother with "strict" notices since they will fill up // the log file with information that isn't normally very // helpful. For example, if you are running PHP 5 and you // use version 4 style class functions (without prefixes // like "public", "private", etc.) you'll get notices telling // you that these have been deprecated. if ($severity == E_STRICT) { return; } $error =& load_class('Exceptions'); // Should we display the error? // We'll get the current error_reporting level and add its bits // with the severity bits to find out. if (($severity & error_reporting()) == $severity) { $error->show_php_error($severity, $message, $filepath, $line); } // Should we log the error? No? We're done... $config =& get_config(); if ($config['log_threshold'] == 0) { return; } $error->log_exception($severity, $message, $filepath, $line); } ?>
Java
/* $Id: opencore_amr.h 4335 2013-01-29 08:09:15Z ming $ */ /* * Copyright (C) 2011-2013 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2011 Dan Arrhenius <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __PJMEDIA_CODEC_OPENCORE_AMR_H__ #define __PJMEDIA_CODEC_OPENCORE_AMR_H__ #include <pjmedia-codec/types.h> /** * @defgroup PJMED_OC_AMR OpenCORE AMR Codec * @ingroup PJMEDIA_CODEC_CODECS * @brief AMRCodec wrapper for OpenCORE AMR codec * @{ */ PJ_BEGIN_DECL /** * Bitmask options to be passed during AMR codec factory initialization. */ enum pjmedia_amr_options { PJMEDIA_AMR_NO_NB = 1, /**< Disable narrowband mode. */ PJMEDIA_AMR_NO_WB = 2, /**< Disable wideband mode. */ }; /** * Settings. Use #pjmedia_codec_opencore_amrnb/wb_set_config() to * activate. */ typedef struct pjmedia_codec_amr_config { /** * Control whether to use octent align. */ pj_bool_t octet_align; /** * Set the bitrate. */ unsigned bitrate; } pjmedia_codec_amr_config; typedef pjmedia_codec_amr_config pjmedia_codec_amrnb_config; typedef pjmedia_codec_amr_config pjmedia_codec_amrwb_config; /** * Initialize and register AMR codec factory to pjmedia endpoint. * * @param endpt The pjmedia endpoint. * @param options Bitmask of pjmedia_amr_options (default=0). * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amr_init(pjmedia_endpt* endpt, unsigned options); /** * Initialize and register AMR codec factory using default settings to * pjmedia endpoint. * * @param endpt The pjmedia endpoint. * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amr_init_default(pjmedia_endpt* endpt); /** * Unregister AMR codec factory from pjmedia endpoint and deinitialize * the OpenCORE codec library. * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amr_deinit(void); /** * Initialize and register AMR-NB codec factory to pjmedia endpoint. Calling * this function will automatically initialize AMR codec factory without * the wideband mode (i.e. it is equivalent to calling * #pjmedia_codec_opencore_amr_init() with PJMEDIA_AMR_NO_WB). Application * should call #pjmedia_codec_opencore_amr_init() instead if wishing to use * both modes. * * @param endpt The pjmedia endpoint. * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amrnb_init(pjmedia_endpt* endpt); /** * Unregister AMR-NB codec factory from pjmedia endpoint and deinitialize * the OpenCORE codec library. * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amrnb_deinit(void); /** * Set AMR-NB parameters. * * @param cfg The settings; * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amrnb_set_config( const pjmedia_codec_amrnb_config* cfg); /** * Set AMR-WB parameters. * * @param cfg The settings; * * @return PJ_SUCCESS on success. */ PJ_DECL(pj_status_t) pjmedia_codec_opencore_amrwb_set_config( const pjmedia_codec_amrwb_config* cfg); PJ_END_DECL /** * @} */ #endif /* __PJMEDIA_CODEC_OPENCORE_AMRNB_H__ */
Java
/**************************************************************************** ** ** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies). ** All rights reserved. ** Contact: Nokia Corporation ([email protected]) ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** GNU Lesser General Public License Usage ** This file may be used under the terms of the GNU Lesser General Public ** License version 2.1 as published by the Free Software Foundation and ** appearing in the file LICENSE.LGPL included in the packaging of this ** file. Please review the following information to ensure the GNU Lesser ** General Public License version 2.1 requirements will be met: ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU General ** Public License version 3.0 as published by the Free Software Foundation ** and appearing in the file LICENSE.GPL included in the packaging of this ** file. Please review the following information to ensure the GNU General ** Public License version 3.0 requirements will be met: ** http://www.gnu.org/copyleft/gpl.html. ** ** Other Usage ** Alternatively, this file may be used in accordance with the terms and ** conditions contained in a signed written agreement between you and Nokia. ** ** ** ** ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #ifndef QMATRIX4X4_H #define QMATRIX4X4_H #include <QtGui/qvector3d.h> #include <QtGui/qvector4d.h> #include <QtGui/qquaternion.h> #include <QtGui/qgenericmatrix.h> #include <QtCore/qrect.h> QT_BEGIN_HEADER QT_BEGIN_NAMESPACE QT_MODULE(Gui) #ifndef QT_NO_MATRIX4X4 class QMatrix; class QTransform; class QVariant; class Q_GUI_EXPORT QMatrix4x4 { public: inline QMatrix4x4() { setToIdentity(); } explicit QMatrix4x4(const qreal *values); inline QMatrix4x4(qreal m11, qreal m12, qreal m13, qreal m14, qreal m21, qreal m22, qreal m23, qreal m24, qreal m31, qreal m32, qreal m33, qreal m34, qreal m41, qreal m42, qreal m43, qreal m44); template <int N, int M> explicit QMatrix4x4(const QGenericMatrix<N, M, qreal>& matrix); QMatrix4x4(const qreal *values, int cols, int rows); QMatrix4x4(const QTransform& transform); QMatrix4x4(const QMatrix& matrix); inline const qreal& operator()(int row, int column) const; inline qreal& operator()(int row, int column); inline QVector4D column(int index) const; inline void setColumn(int index, const QVector4D& value); inline QVector4D row(int index) const; inline void setRow(int index, const QVector4D& value); inline bool isIdentity() const; inline void setToIdentity(); inline void fill(qreal value); qreal determinant() const; QMatrix4x4 inverted(bool *invertible = 0) const; QMatrix4x4 transposed() const; QMatrix3x3 normalMatrix() const; inline QMatrix4x4& operator+=(const QMatrix4x4& other); inline QMatrix4x4& operator-=(const QMatrix4x4& other); inline QMatrix4x4& operator*=(const QMatrix4x4& other); inline QMatrix4x4& operator*=(qreal factor); QMatrix4x4& operator/=(qreal divisor); inline bool operator==(const QMatrix4x4& other) const; inline bool operator!=(const QMatrix4x4& other) const; friend QMatrix4x4 operator+(const QMatrix4x4& m1, const QMatrix4x4& m2); friend QMatrix4x4 operator-(const QMatrix4x4& m1, const QMatrix4x4& m2); friend QMatrix4x4 operator*(const QMatrix4x4& m1, const QMatrix4x4& m2); #ifndef QT_NO_VECTOR3D friend QVector3D operator*(const QMatrix4x4& matrix, const QVector3D& vector); friend QVector3D operator*(const QVector3D& vector, const QMatrix4x4& matrix); #endif #ifndef QT_NO_VECTOR4D friend QVector4D operator*(const QVector4D& vector, const QMatrix4x4& matrix); friend QVector4D operator*(const QMatrix4x4& matrix, const QVector4D& vector); #endif friend QPoint operator*(const QPoint& point, const QMatrix4x4& matrix); friend QPointF operator*(const QPointF& point, const QMatrix4x4& matrix); friend QMatrix4x4 operator-(const QMatrix4x4& matrix); friend QPoint operator*(const QMatrix4x4& matrix, const QPoint& point); friend QPointF operator*(const QMatrix4x4& matrix, const QPointF& point); friend QMatrix4x4 operator*(qreal factor, const QMatrix4x4& matrix); friend QMatrix4x4 operator*(const QMatrix4x4& matrix, qreal factor); friend Q_GUI_EXPORT QMatrix4x4 operator/(const QMatrix4x4& matrix, qreal divisor); friend inline bool qFuzzyCompare(const QMatrix4x4& m1, const QMatrix4x4& m2); #ifndef QT_NO_VECTOR3D void scale(const QVector3D& vector); void translate(const QVector3D& vector); void rotate(qreal angle, const QVector3D& vector); #endif void scale(qreal x, qreal y); void scale(qreal x, qreal y, qreal z); void scale(qreal factor); void translate(qreal x, qreal y); void translate(qreal x, qreal y, qreal z); void rotate(qreal angle, qreal x, qreal y, qreal z = 0.0f); #ifndef QT_NO_QUATERNION void rotate(const QQuaternion& quaternion); #endif void ortho(const QRect& rect); void ortho(const QRectF& rect); void ortho(qreal left, qreal right, qreal bottom, qreal top, qreal nearPlane, qreal farPlane); void frustum(qreal left, qreal right, qreal bottom, qreal top, qreal nearPlane, qreal farPlane); void perspective(qreal angle, qreal aspect, qreal nearPlane, qreal farPlane); #ifndef QT_NO_VECTOR3D void lookAt(const QVector3D& eye, const QVector3D& center, const QVector3D& up); #endif void flipCoordinates(); void copyDataTo(qreal *values) const; QMatrix toAffine() const; QTransform toTransform() const; QTransform toTransform(qreal distanceToPlane) const; QPoint map(const QPoint& point) const; QPointF map(const QPointF& point) const; #ifndef QT_NO_VECTOR3D QVector3D map(const QVector3D& point) const; QVector3D mapVector(const QVector3D& vector) const; #endif #ifndef QT_NO_VECTOR4D QVector4D map(const QVector4D& point) const; #endif QRect mapRect(const QRect& rect) const; QRectF mapRect(const QRectF& rect) const; template <int N, int M> QGenericMatrix<N, M, qreal> toGenericMatrix() const; inline qreal *data(); inline const qreal *data() const { return *m; } inline const qreal *constData() const { return *m; } void optimize(); operator QVariant() const; #ifndef QT_NO_DEBUG_STREAM friend Q_GUI_EXPORT QDebug operator<<(QDebug dbg, const QMatrix4x4 &m); #endif private: qreal m[4][4]; // Column-major order to match OpenGL. int flagBits; // Flag bits from the enum below. enum { Identity = 0x0001, // Identity matrix General = 0x0002, // General matrix, unknown contents Translation = 0x0004, // Contains a simple translation Scale = 0x0008, // Contains a simple scale Rotation = 0x0010 // Contains a simple rotation }; // Construct without initializing identity matrix. QMatrix4x4(int) { flagBits = General; } QMatrix4x4 orthonormalInverse() const; void projectedRotate(qreal angle, qreal x, qreal y, qreal z); friend class QGraphicsRotation; }; Q_DECLARE_TYPEINFO(QMatrix4x4, Q_MOVABLE_TYPE); inline QMatrix4x4::QMatrix4x4 (qreal m11, qreal m12, qreal m13, qreal m14, qreal m21, qreal m22, qreal m23, qreal m24, qreal m31, qreal m32, qreal m33, qreal m34, qreal m41, qreal m42, qreal m43, qreal m44) { m[0][0] = m11; m[0][1] = m21; m[0][2] = m31; m[0][3] = m41; m[1][0] = m12; m[1][1] = m22; m[1][2] = m32; m[1][3] = m42; m[2][0] = m13; m[2][1] = m23; m[2][2] = m33; m[2][3] = m43; m[3][0] = m14; m[3][1] = m24; m[3][2] = m34; m[3][3] = m44; flagBits = General; } template <int N, int M> Q_INLINE_TEMPLATE QMatrix4x4::QMatrix4x4 (const QGenericMatrix<N, M, qreal>& matrix) { const qreal *values = matrix.constData(); for (int matrixCol = 0; matrixCol < 4; ++matrixCol) { for (int matrixRow = 0; matrixRow < 4; ++matrixRow) { if (matrixCol < N && matrixRow < M) m[matrixCol][matrixRow] = values[matrixCol * M + matrixRow]; else if (matrixCol == matrixRow) m[matrixCol][matrixRow] = 1.0f; else m[matrixCol][matrixRow] = 0.0f; } } flagBits = General; } template <int N, int M> QGenericMatrix<N, M, qreal> QMatrix4x4::toGenericMatrix() const { QGenericMatrix<N, M, qreal> result; qreal *values = result.data(); for (int matrixCol = 0; matrixCol < N; ++matrixCol) { for (int matrixRow = 0; matrixRow < M; ++matrixRow) { if (matrixCol < 4 && matrixRow < 4) values[matrixCol * M + matrixRow] = m[matrixCol][matrixRow]; else if (matrixCol == matrixRow) values[matrixCol * M + matrixRow] = 1.0f; else values[matrixCol * M + matrixRow] = 0.0f; } } return result; } inline const qreal& QMatrix4x4::operator()(int aRow, int aColumn) const { Q_ASSERT(aRow >= 0 && aRow < 4 && aColumn >= 0 && aColumn < 4); return m[aColumn][aRow]; } inline qreal& QMatrix4x4::operator()(int aRow, int aColumn) { Q_ASSERT(aRow >= 0 && aRow < 4 && aColumn >= 0 && aColumn < 4); flagBits = General; return m[aColumn][aRow]; } inline QVector4D QMatrix4x4::column(int index) const { Q_ASSERT(index >= 0 && index < 4); return QVector4D(m[index][0], m[index][1], m[index][2], m[index][3]); } inline void QMatrix4x4::setColumn(int index, const QVector4D& value) { Q_ASSERT(index >= 0 && index < 4); m[index][0] = value.x(); m[index][1] = value.y(); m[index][2] = value.z(); m[index][3] = value.w(); flagBits = General; } inline QVector4D QMatrix4x4::row(int index) const { Q_ASSERT(index >= 0 && index < 4); return QVector4D(m[0][index], m[1][index], m[2][index], m[3][index]); } inline void QMatrix4x4::setRow(int index, const QVector4D& value) { Q_ASSERT(index >= 0 && index < 4); m[0][index] = value.x(); m[1][index] = value.y(); m[2][index] = value.z(); m[3][index] = value.w(); flagBits = General; } Q_GUI_EXPORT QMatrix4x4 operator/(const QMatrix4x4& matrix, qreal divisor); inline bool QMatrix4x4::isIdentity() const { if (flagBits == Identity) return true; if (m[0][0] != 1.0f || m[0][1] != 0.0f || m[0][2] != 0.0f) return false; if (m[0][3] != 0.0f || m[1][0] != 0.0f || m[1][1] != 1.0f) return false; if (m[1][2] != 0.0f || m[1][3] != 0.0f || m[2][0] != 0.0f) return false; if (m[2][1] != 0.0f || m[2][2] != 1.0f || m[2][3] != 0.0f) return false; if (m[3][0] != 0.0f || m[3][1] != 0.0f || m[3][2] != 0.0f) return false; return (m[3][3] == 1.0f); } inline void QMatrix4x4::setToIdentity() { m[0][0] = 1.0f; m[0][1] = 0.0f; m[0][2] = 0.0f; m[0][3] = 0.0f; m[1][0] = 0.0f; m[1][1] = 1.0f; m[1][2] = 0.0f; m[1][3] = 0.0f; m[2][0] = 0.0f; m[2][1] = 0.0f; m[2][2] = 1.0f; m[2][3] = 0.0f; m[3][0] = 0.0f; m[3][1] = 0.0f; m[3][2] = 0.0f; m[3][3] = 1.0f; flagBits = Identity; } inline void QMatrix4x4::fill(qreal value) { m[0][0] = value; m[0][1] = value; m[0][2] = value; m[0][3] = value; m[1][0] = value; m[1][1] = value; m[1][2] = value; m[1][3] = value; m[2][0] = value; m[2][1] = value; m[2][2] = value; m[2][3] = value; m[3][0] = value; m[3][1] = value; m[3][2] = value; m[3][3] = value; flagBits = General; } inline QMatrix4x4& QMatrix4x4::operator+=(const QMatrix4x4& other) { m[0][0] += other.m[0][0]; m[0][1] += other.m[0][1]; m[0][2] += other.m[0][2]; m[0][3] += other.m[0][3]; m[1][0] += other.m[1][0]; m[1][1] += other.m[1][1]; m[1][2] += other.m[1][2]; m[1][3] += other.m[1][3]; m[2][0] += other.m[2][0]; m[2][1] += other.m[2][1]; m[2][2] += other.m[2][2]; m[2][3] += other.m[2][3]; m[3][0] += other.m[3][0]; m[3][1] += other.m[3][1]; m[3][2] += other.m[3][2]; m[3][3] += other.m[3][3]; flagBits = General; return *this; } inline QMatrix4x4& QMatrix4x4::operator-=(const QMatrix4x4& other) { m[0][0] -= other.m[0][0]; m[0][1] -= other.m[0][1]; m[0][2] -= other.m[0][2]; m[0][3] -= other.m[0][3]; m[1][0] -= other.m[1][0]; m[1][1] -= other.m[1][1]; m[1][2] -= other.m[1][2]; m[1][3] -= other.m[1][3]; m[2][0] -= other.m[2][0]; m[2][1] -= other.m[2][1]; m[2][2] -= other.m[2][2]; m[2][3] -= other.m[2][3]; m[3][0] -= other.m[3][0]; m[3][1] -= other.m[3][1]; m[3][2] -= other.m[3][2]; m[3][3] -= other.m[3][3]; flagBits = General; return *this; } inline QMatrix4x4& QMatrix4x4::operator*=(const QMatrix4x4& other) { if (flagBits == Identity) { *this = other; return *this; } else if (other.flagBits == Identity) { return *this; } else { *this = *this * other; return *this; } } inline QMatrix4x4& QMatrix4x4::operator*=(qreal factor) { m[0][0] *= factor; m[0][1] *= factor; m[0][2] *= factor; m[0][3] *= factor; m[1][0] *= factor; m[1][1] *= factor; m[1][2] *= factor; m[1][3] *= factor; m[2][0] *= factor; m[2][1] *= factor; m[2][2] *= factor; m[2][3] *= factor; m[3][0] *= factor; m[3][1] *= factor; m[3][2] *= factor; m[3][3] *= factor; flagBits = General; return *this; } inline bool QMatrix4x4::operator==(const QMatrix4x4& other) const { return m[0][0] == other.m[0][0] && m[0][1] == other.m[0][1] && m[0][2] == other.m[0][2] && m[0][3] == other.m[0][3] && m[1][0] == other.m[1][0] && m[1][1] == other.m[1][1] && m[1][2] == other.m[1][2] && m[1][3] == other.m[1][3] && m[2][0] == other.m[2][0] && m[2][1] == other.m[2][1] && m[2][2] == other.m[2][2] && m[2][3] == other.m[2][3] && m[3][0] == other.m[3][0] && m[3][1] == other.m[3][1] && m[3][2] == other.m[3][2] && m[3][3] == other.m[3][3]; } inline bool QMatrix4x4::operator!=(const QMatrix4x4& other) const { return m[0][0] != other.m[0][0] || m[0][1] != other.m[0][1] || m[0][2] != other.m[0][2] || m[0][3] != other.m[0][3] || m[1][0] != other.m[1][0] || m[1][1] != other.m[1][1] || m[1][2] != other.m[1][2] || m[1][3] != other.m[1][3] || m[2][0] != other.m[2][0] || m[2][1] != other.m[2][1] || m[2][2] != other.m[2][2] || m[2][3] != other.m[2][3] || m[3][0] != other.m[3][0] || m[3][1] != other.m[3][1] || m[3][2] != other.m[3][2] || m[3][3] != other.m[3][3]; } inline QMatrix4x4 operator+(const QMatrix4x4& m1, const QMatrix4x4& m2) { QMatrix4x4 m(1); m.m[0][0] = m1.m[0][0] + m2.m[0][0]; m.m[0][1] = m1.m[0][1] + m2.m[0][1]; m.m[0][2] = m1.m[0][2] + m2.m[0][2]; m.m[0][3] = m1.m[0][3] + m2.m[0][3]; m.m[1][0] = m1.m[1][0] + m2.m[1][0]; m.m[1][1] = m1.m[1][1] + m2.m[1][1]; m.m[1][2] = m1.m[1][2] + m2.m[1][2]; m.m[1][3] = m1.m[1][3] + m2.m[1][3]; m.m[2][0] = m1.m[2][0] + m2.m[2][0]; m.m[2][1] = m1.m[2][1] + m2.m[2][1]; m.m[2][2] = m1.m[2][2] + m2.m[2][2]; m.m[2][3] = m1.m[2][3] + m2.m[2][3]; m.m[3][0] = m1.m[3][0] + m2.m[3][0]; m.m[3][1] = m1.m[3][1] + m2.m[3][1]; m.m[3][2] = m1.m[3][2] + m2.m[3][2]; m.m[3][3] = m1.m[3][3] + m2.m[3][3]; return m; } inline QMatrix4x4 operator-(const QMatrix4x4& m1, const QMatrix4x4& m2) { QMatrix4x4 m(1); m.m[0][0] = m1.m[0][0] - m2.m[0][0]; m.m[0][1] = m1.m[0][1] - m2.m[0][1]; m.m[0][2] = m1.m[0][2] - m2.m[0][2]; m.m[0][3] = m1.m[0][3] - m2.m[0][3]; m.m[1][0] = m1.m[1][0] - m2.m[1][0]; m.m[1][1] = m1.m[1][1] - m2.m[1][1]; m.m[1][2] = m1.m[1][2] - m2.m[1][2]; m.m[1][3] = m1.m[1][3] - m2.m[1][3]; m.m[2][0] = m1.m[2][0] - m2.m[2][0]; m.m[2][1] = m1.m[2][1] - m2.m[2][1]; m.m[2][2] = m1.m[2][2] - m2.m[2][2]; m.m[2][3] = m1.m[2][3] - m2.m[2][3]; m.m[3][0] = m1.m[3][0] - m2.m[3][0]; m.m[3][1] = m1.m[3][1] - m2.m[3][1]; m.m[3][2] = m1.m[3][2] - m2.m[3][2]; m.m[3][3] = m1.m[3][3] - m2.m[3][3]; return m; } inline QMatrix4x4 operator*(const QMatrix4x4& m1, const QMatrix4x4& m2) { if (m1.flagBits == QMatrix4x4::Identity) return m2; else if (m2.flagBits == QMatrix4x4::Identity) return m1; QMatrix4x4 m(1); m.m[0][0] = m1.m[0][0] * m2.m[0][0] + m1.m[1][0] * m2.m[0][1] + m1.m[2][0] * m2.m[0][2] + m1.m[3][0] * m2.m[0][3]; m.m[0][1] = m1.m[0][1] * m2.m[0][0] + m1.m[1][1] * m2.m[0][1] + m1.m[2][1] * m2.m[0][2] + m1.m[3][1] * m2.m[0][3]; m.m[0][2] = m1.m[0][2] * m2.m[0][0] + m1.m[1][2] * m2.m[0][1] + m1.m[2][2] * m2.m[0][2] + m1.m[3][2] * m2.m[0][3]; m.m[0][3] = m1.m[0][3] * m2.m[0][0] + m1.m[1][3] * m2.m[0][1] + m1.m[2][3] * m2.m[0][2] + m1.m[3][3] * m2.m[0][3]; m.m[1][0] = m1.m[0][0] * m2.m[1][0] + m1.m[1][0] * m2.m[1][1] + m1.m[2][0] * m2.m[1][2] + m1.m[3][0] * m2.m[1][3]; m.m[1][1] = m1.m[0][1] * m2.m[1][0] + m1.m[1][1] * m2.m[1][1] + m1.m[2][1] * m2.m[1][2] + m1.m[3][1] * m2.m[1][3]; m.m[1][2] = m1.m[0][2] * m2.m[1][0] + m1.m[1][2] * m2.m[1][1] + m1.m[2][2] * m2.m[1][2] + m1.m[3][2] * m2.m[1][3]; m.m[1][3] = m1.m[0][3] * m2.m[1][0] + m1.m[1][3] * m2.m[1][1] + m1.m[2][3] * m2.m[1][2] + m1.m[3][3] * m2.m[1][3]; m.m[2][0] = m1.m[0][0] * m2.m[2][0] + m1.m[1][0] * m2.m[2][1] + m1.m[2][0] * m2.m[2][2] + m1.m[3][0] * m2.m[2][3]; m.m[2][1] = m1.m[0][1] * m2.m[2][0] + m1.m[1][1] * m2.m[2][1] + m1.m[2][1] * m2.m[2][2] + m1.m[3][1] * m2.m[2][3]; m.m[2][2] = m1.m[0][2] * m2.m[2][0] + m1.m[1][2] * m2.m[2][1] + m1.m[2][2] * m2.m[2][2] + m1.m[3][2] * m2.m[2][3]; m.m[2][3] = m1.m[0][3] * m2.m[2][0] + m1.m[1][3] * m2.m[2][1] + m1.m[2][3] * m2.m[2][2] + m1.m[3][3] * m2.m[2][3]; m.m[3][0] = m1.m[0][0] * m2.m[3][0] + m1.m[1][0] * m2.m[3][1] + m1.m[2][0] * m2.m[3][2] + m1.m[3][0] * m2.m[3][3]; m.m[3][1] = m1.m[0][1] * m2.m[3][0] + m1.m[1][1] * m2.m[3][1] + m1.m[2][1] * m2.m[3][2] + m1.m[3][1] * m2.m[3][3]; m.m[3][2] = m1.m[0][2] * m2.m[3][0] + m1.m[1][2] * m2.m[3][1] + m1.m[2][2] * m2.m[3][2] + m1.m[3][2] * m2.m[3][3]; m.m[3][3] = m1.m[0][3] * m2.m[3][0] + m1.m[1][3] * m2.m[3][1] + m1.m[2][3] * m2.m[3][2] + m1.m[3][3] * m2.m[3][3]; return m; } #ifndef QT_NO_VECTOR3D inline QVector3D operator*(const QVector3D& vector, const QMatrix4x4& matrix) { qreal x, y, z, w; x = vector.x() * matrix.m[0][0] + vector.y() * matrix.m[0][1] + vector.z() * matrix.m[0][2] + matrix.m[0][3]; y = vector.x() * matrix.m[1][0] + vector.y() * matrix.m[1][1] + vector.z() * matrix.m[1][2] + matrix.m[1][3]; z = vector.x() * matrix.m[2][0] + vector.y() * matrix.m[2][1] + vector.z() * matrix.m[2][2] + matrix.m[2][3]; w = vector.x() * matrix.m[3][0] + vector.y() * matrix.m[3][1] + vector.z() * matrix.m[3][2] + matrix.m[3][3]; if (w == 1.0f) return QVector3D(x, y, z); else return QVector3D(x / w, y / w, z / w); } inline QVector3D operator*(const QMatrix4x4& matrix, const QVector3D& vector) { qreal x, y, z, w; if (matrix.flagBits == QMatrix4x4::Identity) { return vector; } else if (matrix.flagBits == QMatrix4x4::Translation) { return QVector3D(vector.x() + matrix.m[3][0], vector.y() + matrix.m[3][1], vector.z() + matrix.m[3][2]); } else if (matrix.flagBits == (QMatrix4x4::Translation | QMatrix4x4::Scale)) { return QVector3D(vector.x() * matrix.m[0][0] + matrix.m[3][0], vector.y() * matrix.m[1][1] + matrix.m[3][1], vector.z() * matrix.m[2][2] + matrix.m[3][2]); } else if (matrix.flagBits == QMatrix4x4::Scale) { return QVector3D(vector.x() * matrix.m[0][0], vector.y() * matrix.m[1][1], vector.z() * matrix.m[2][2]); } else { x = vector.x() * matrix.m[0][0] + vector.y() * matrix.m[1][0] + vector.z() * matrix.m[2][0] + matrix.m[3][0]; y = vector.x() * matrix.m[0][1] + vector.y() * matrix.m[1][1] + vector.z() * matrix.m[2][1] + matrix.m[3][1]; z = vector.x() * matrix.m[0][2] + vector.y() * matrix.m[1][2] + vector.z() * matrix.m[2][2] + matrix.m[3][2]; w = vector.x() * matrix.m[0][3] + vector.y() * matrix.m[1][3] + vector.z() * matrix.m[2][3] + matrix.m[3][3]; if (w == 1.0f) return QVector3D(x, y, z); else return QVector3D(x / w, y / w, z / w); } } #endif #ifndef QT_NO_VECTOR4D inline QVector4D operator*(const QVector4D& vector, const QMatrix4x4& matrix) { qreal x, y, z, w; x = vector.x() * matrix.m[0][0] + vector.y() * matrix.m[0][1] + vector.z() * matrix.m[0][2] + vector.w() * matrix.m[0][3]; y = vector.x() * matrix.m[1][0] + vector.y() * matrix.m[1][1] + vector.z() * matrix.m[1][2] + vector.w() * matrix.m[1][3]; z = vector.x() * matrix.m[2][0] + vector.y() * matrix.m[2][1] + vector.z() * matrix.m[2][2] + vector.w() * matrix.m[2][3]; w = vector.x() * matrix.m[3][0] + vector.y() * matrix.m[3][1] + vector.z() * matrix.m[3][2] + vector.w() * matrix.m[3][3]; return QVector4D(x, y, z, w); } inline QVector4D operator*(const QMatrix4x4& matrix, const QVector4D& vector) { qreal x, y, z, w; x = vector.x() * matrix.m[0][0] + vector.y() * matrix.m[1][0] + vector.z() * matrix.m[2][0] + vector.w() * matrix.m[3][0]; y = vector.x() * matrix.m[0][1] + vector.y() * matrix.m[1][1] + vector.z() * matrix.m[2][1] + vector.w() * matrix.m[3][1]; z = vector.x() * matrix.m[0][2] + vector.y() * matrix.m[1][2] + vector.z() * matrix.m[2][2] + vector.w() * matrix.m[3][2]; w = vector.x() * matrix.m[0][3] + vector.y() * matrix.m[1][3] + vector.z() * matrix.m[2][3] + vector.w() * matrix.m[3][3]; return QVector4D(x, y, z, w); } #endif inline QPoint operator*(const QPoint& point, const QMatrix4x4& matrix) { qreal xin, yin; qreal x, y, w; xin = point.x(); yin = point.y(); x = xin * matrix.m[0][0] + yin * matrix.m[0][1] + matrix.m[0][3]; y = xin * matrix.m[1][0] + yin * matrix.m[1][1] + matrix.m[1][3]; w = xin * matrix.m[3][0] + yin * matrix.m[3][1] + matrix.m[3][3]; if (w == 1.0f) return QPoint(qRound(x), qRound(y)); else return QPoint(qRound(x / w), qRound(y / w)); } inline QPointF operator*(const QPointF& point, const QMatrix4x4& matrix) { qreal xin, yin; qreal x, y, w; xin = point.x(); yin = point.y(); x = xin * matrix.m[0][0] + yin * matrix.m[0][1] + matrix.m[0][3]; y = xin * matrix.m[1][0] + yin * matrix.m[1][1] + matrix.m[1][3]; w = xin * matrix.m[3][0] + yin * matrix.m[3][1] + matrix.m[3][3]; if (w == 1.0f) { return QPointF(qreal(x), qreal(y)); } else { return QPointF(qreal(x / w), qreal(y / w)); } } inline QPoint operator*(const QMatrix4x4& matrix, const QPoint& point) { qreal xin, yin; qreal x, y, w; xin = point.x(); yin = point.y(); if (matrix.flagBits == QMatrix4x4::Identity) { return point; } else if (matrix.flagBits == QMatrix4x4::Translation) { return QPoint(qRound(xin + matrix.m[3][0]), qRound(yin + matrix.m[3][1])); } else if (matrix.flagBits == (QMatrix4x4::Translation | QMatrix4x4::Scale)) { return QPoint(qRound(xin * matrix.m[0][0] + matrix.m[3][0]), qRound(yin * matrix.m[1][1] + matrix.m[3][1])); } else if (matrix.flagBits == QMatrix4x4::Scale) { return QPoint(qRound(xin * matrix.m[0][0]), qRound(yin * matrix.m[1][1])); } else { x = xin * matrix.m[0][0] + yin * matrix.m[1][0] + matrix.m[3][0]; y = xin * matrix.m[0][1] + yin * matrix.m[1][1] + matrix.m[3][1]; w = xin * matrix.m[0][3] + yin * matrix.m[1][3] + matrix.m[3][3]; if (w == 1.0f) return QPoint(qRound(x), qRound(y)); else return QPoint(qRound(x / w), qRound(y / w)); } } inline QPointF operator*(const QMatrix4x4& matrix, const QPointF& point) { qreal xin, yin; qreal x, y, w; xin = point.x(); yin = point.y(); if (matrix.flagBits == QMatrix4x4::Identity) { return point; } else if (matrix.flagBits == QMatrix4x4::Translation) { return QPointF(xin + matrix.m[3][0], yin + matrix.m[3][1]); } else if (matrix.flagBits == (QMatrix4x4::Translation | QMatrix4x4::Scale)) { return QPointF(xin * matrix.m[0][0] + matrix.m[3][0], yin * matrix.m[1][1] + matrix.m[3][1]); } else if (matrix.flagBits == QMatrix4x4::Scale) { return QPointF(xin * matrix.m[0][0], yin * matrix.m[1][1]); } else { x = xin * matrix.m[0][0] + yin * matrix.m[1][0] + matrix.m[3][0]; y = xin * matrix.m[0][1] + yin * matrix.m[1][1] + matrix.m[3][1]; w = xin * matrix.m[0][3] + yin * matrix.m[1][3] + matrix.m[3][3]; if (w == 1.0f) { return QPointF(qreal(x), qreal(y)); } else { return QPointF(qreal(x / w), qreal(y / w)); } } } inline QMatrix4x4 operator-(const QMatrix4x4& matrix) { QMatrix4x4 m(1); m.m[0][0] = -matrix.m[0][0]; m.m[0][1] = -matrix.m[0][1]; m.m[0][2] = -matrix.m[0][2]; m.m[0][3] = -matrix.m[0][3]; m.m[1][0] = -matrix.m[1][0]; m.m[1][1] = -matrix.m[1][1]; m.m[1][2] = -matrix.m[1][2]; m.m[1][3] = -matrix.m[1][3]; m.m[2][0] = -matrix.m[2][0]; m.m[2][1] = -matrix.m[2][1]; m.m[2][2] = -matrix.m[2][2]; m.m[2][3] = -matrix.m[2][3]; m.m[3][0] = -matrix.m[3][0]; m.m[3][1] = -matrix.m[3][1]; m.m[3][2] = -matrix.m[3][2]; m.m[3][3] = -matrix.m[3][3]; return m; } inline QMatrix4x4 operator*(qreal factor, const QMatrix4x4& matrix) { QMatrix4x4 m(1); m.m[0][0] = matrix.m[0][0] * factor; m.m[0][1] = matrix.m[0][1] * factor; m.m[0][2] = matrix.m[0][2] * factor; m.m[0][3] = matrix.m[0][3] * factor; m.m[1][0] = matrix.m[1][0] * factor; m.m[1][1] = matrix.m[1][1] * factor; m.m[1][2] = matrix.m[1][2] * factor; m.m[1][3] = matrix.m[1][3] * factor; m.m[2][0] = matrix.m[2][0] * factor; m.m[2][1] = matrix.m[2][1] * factor; m.m[2][2] = matrix.m[2][2] * factor; m.m[2][3] = matrix.m[2][3] * factor; m.m[3][0] = matrix.m[3][0] * factor; m.m[3][1] = matrix.m[3][1] * factor; m.m[3][2] = matrix.m[3][2] * factor; m.m[3][3] = matrix.m[3][3] * factor; return m; } inline QMatrix4x4 operator*(const QMatrix4x4& matrix, qreal factor) { QMatrix4x4 m(1); m.m[0][0] = matrix.m[0][0] * factor; m.m[0][1] = matrix.m[0][1] * factor; m.m[0][2] = matrix.m[0][2] * factor; m.m[0][3] = matrix.m[0][3] * factor; m.m[1][0] = matrix.m[1][0] * factor; m.m[1][1] = matrix.m[1][1] * factor; m.m[1][2] = matrix.m[1][2] * factor; m.m[1][3] = matrix.m[1][3] * factor; m.m[2][0] = matrix.m[2][0] * factor; m.m[2][1] = matrix.m[2][1] * factor; m.m[2][2] = matrix.m[2][2] * factor; m.m[2][3] = matrix.m[2][3] * factor; m.m[3][0] = matrix.m[3][0] * factor; m.m[3][1] = matrix.m[3][1] * factor; m.m[3][2] = matrix.m[3][2] * factor; m.m[3][3] = matrix.m[3][3] * factor; return m; } inline bool qFuzzyCompare(const QMatrix4x4& m1, const QMatrix4x4& m2) { return qFuzzyCompare(m1.m[0][0], m2.m[0][0]) && qFuzzyCompare(m1.m[0][1], m2.m[0][1]) && qFuzzyCompare(m1.m[0][2], m2.m[0][2]) && qFuzzyCompare(m1.m[0][3], m2.m[0][3]) && qFuzzyCompare(m1.m[1][0], m2.m[1][0]) && qFuzzyCompare(m1.m[1][1], m2.m[1][1]) && qFuzzyCompare(m1.m[1][2], m2.m[1][2]) && qFuzzyCompare(m1.m[1][3], m2.m[1][3]) && qFuzzyCompare(m1.m[2][0], m2.m[2][0]) && qFuzzyCompare(m1.m[2][1], m2.m[2][1]) && qFuzzyCompare(m1.m[2][2], m2.m[2][2]) && qFuzzyCompare(m1.m[2][3], m2.m[2][3]) && qFuzzyCompare(m1.m[3][0], m2.m[3][0]) && qFuzzyCompare(m1.m[3][1], m2.m[3][1]) && qFuzzyCompare(m1.m[3][2], m2.m[3][2]) && qFuzzyCompare(m1.m[3][3], m2.m[3][3]); } inline QPoint QMatrix4x4::map(const QPoint& point) const { return *this * point; } inline QPointF QMatrix4x4::map(const QPointF& point) const { return *this * point; } #ifndef QT_NO_VECTOR3D inline QVector3D QMatrix4x4::map(const QVector3D& point) const { return *this * point; } inline QVector3D QMatrix4x4::mapVector(const QVector3D& vector) const { if (flagBits == Identity || flagBits == Translation) { return vector; } else if (flagBits == Scale || flagBits == (Translation | Scale)) { return QVector3D(vector.x() * m[0][0], vector.y() * m[1][1], vector.z() * m[2][2]); } else { return QVector3D(vector.x() * m[0][0] + vector.y() * m[1][0] + vector.z() * m[2][0], vector.x() * m[0][1] + vector.y() * m[1][1] + vector.z() * m[2][1], vector.x() * m[0][2] + vector.y() * m[1][2] + vector.z() * m[2][2]); } } #endif #ifndef QT_NO_VECTOR4D inline QVector4D QMatrix4x4::map(const QVector4D& point) const { return *this * point; } #endif inline qreal *QMatrix4x4::data() { // We have to assume that the caller will modify the matrix elements, // so we flip it over to "General" mode. flagBits = General; return *m; } #ifndef QT_NO_DEBUG_STREAM Q_GUI_EXPORT QDebug operator<<(QDebug dbg, const QMatrix4x4 &m); #endif #ifndef QT_NO_DATASTREAM Q_GUI_EXPORT QDataStream &operator<<(QDataStream &, const QMatrix4x4 &); Q_GUI_EXPORT QDataStream &operator>>(QDataStream &, QMatrix4x4 &); #endif #ifdef QT_DEPRECATED template <int N, int M> QT_DEPRECATED QMatrix4x4 qGenericMatrixToMatrix4x4(const QGenericMatrix<N, M, qreal>& matrix) { return QMatrix4x4(matrix.constData(), N, M); } template <int N, int M> QT_DEPRECATED QGenericMatrix<N, M, qreal> qGenericMatrixFromMatrix4x4(const QMatrix4x4& matrix) { QGenericMatrix<N, M, qreal> result; const qreal *m = matrix.constData(); qreal *values = result.data(); for (int col = 0; col < N; ++col) { for (int row = 0; row < M; ++row) { if (col < 4 && row < 4) values[col * M + row] = m[col * 4 + row]; else if (col == row) values[col * M + row] = 1.0f; else values[col * M + row] = 0.0f; } } return result; } #endif #endif QT_END_NAMESPACE QT_END_HEADER #endif
Java
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Mage * @package Mage_CatalogInventory * @copyright Copyright (c) 2006-2014 X.commerce, Inc. (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ $installer = $this; /* @var $installer Mage_Core_Model_Resource_Setup */ $installer->startSetup(); $installer->getConnection()->addColumn($this->getTable('cataloginventory_stock_item'), 'stock_status_changed_automatically', 'tinyint(1) unsigned NOT NULL DEFAULT 0'); $installer->endSetup();
Java
/********************************************************************** * * Copyright (C) Imagination Technologies Ltd. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful but, except * as otherwise stated in writing, without any warranty; without even the * implied warranty of merchantability or fitness for a particular purpose. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * Imagination Technologies Ltd. <[email protected]> * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK * ******************************************************************************/ #include <linux/version.h> #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) #ifndef AUTOCONF_INCLUDED #include <linux/config.h> #endif #endif #if defined(SUPPORT_DRI_DRM) #define PVR_MOD_STATIC #else #if defined(LDM_PLATFORM) #define PVR_LDM_PLATFORM_MODULE #define PVR_LDM_MODULE #else #if defined(LDM_PCI) #define PVR_LDM_PCI_MODULE #define PVR_LDM_MODULE #endif #endif #define PVR_MOD_STATIC static #endif #if defined(PVR_LDM_PLATFORM_PRE_REGISTERED) #if !defined(NO_HARDWARE) #define PVR_USE_PRE_REGISTERED_PLATFORM_DEV #endif #endif #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/proc_fs.h> #if defined(SUPPORT_DRI_DRM) #include <drm/drmP.h> #if defined(PVR_SECURE_DRM_AUTH_EXPORT) #include "env_perproc.h" #endif #endif #if defined(PVR_LDM_PLATFORM_MODULE) #include <linux/platform_device.h> #endif #if defined(PVR_LDM_PCI_MODULE) #include <linux/pci.h> #endif #if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) #include <asm/uaccess.h> #endif #include "img_defs.h" #include "services.h" #include "kerneldisplay.h" #include "kernelbuffer.h" #include "syscommon.h" #include "pvrmmap.h" #include "mutils.h" #include "mm.h" #include "mmap.h" #include "mutex.h" #include "pvr_debug.h" #include "srvkm.h" #include "perproc.h" #include "handle.h" #include "pvr_bridge_km.h" #include "proc.h" #include "pvrmodule.h" #include "private_data.h" #include "lock.h" #include "linkage.h" #if defined(SUPPORT_DRI_DRM) #include "pvr_drm.h" #endif #define DRVNAME PVRSRV_MODNAME #define DEVNAME PVRSRV_MODNAME #if defined(SUPPORT_DRI_DRM) #define PRIVATE_DATA(pFile) ((pFile)->driver_priv) #else #define PRIVATE_DATA(pFile) ((pFile)->private_data) #endif MODULE_SUPPORTED_DEVICE(DEVNAME); #if defined(PVRSRV_NEED_PVR_DPF) #include <linux/moduleparam.h> extern IMG_UINT32 gPVRDebugLevel; module_param(gPVRDebugLevel, uint, 0644); MODULE_PARM_DESC(gPVRDebugLevel, "Sets the level of debug output (default 0x7)"); #endif EXPORT_SYMBOL(PVRGetDisplayClassJTable); EXPORT_SYMBOL(PVRGetBufferClassJTable); #if defined(PVR_LDM_MODULE) static struct class *psPvrClass; #endif #if !defined(SUPPORT_DRI_DRM) static int AssignedMajorNumber; static int PVRSRVOpen(struct inode* pInode, struct file* pFile); static int PVRSRVRelease(struct inode* pInode, struct file* pFile); static struct file_operations pvrsrv_fops = { .owner=THIS_MODULE, .unlocked_ioctl = PVRSRV_BridgeDispatchKM, .open=PVRSRVOpen, .release=PVRSRVRelease, .mmap=PVRMMap, }; #endif PVRSRV_LINUX_MUTEX gPVRSRVLock; IMG_UINT32 gui32ReleasePID; #if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) static IMG_UINT32 gPVRPowerLevel; #endif #if defined(PVR_LDM_MODULE) #if defined(PVR_LDM_PLATFORM_MODULE) #define LDM_DEV struct platform_device #define LDM_DRV struct platform_driver #endif #if defined(PVR_LDM_PCI_MODULE) #define LDM_DEV struct pci_dev #define LDM_DRV struct pci_driver #endif #if defined(PVR_LDM_PLATFORM_MODULE) static int PVRSRVDriverRemove(LDM_DEV *device); static int PVRSRVDriverProbe(LDM_DEV *device); #endif #if defined(PVR_LDM_PCI_MODULE) static void PVRSRVDriverRemove(LDM_DEV *device); static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id); #endif static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state); static void PVRSRVDriverShutdown(LDM_DEV *device); static int PVRSRVDriverResume(LDM_DEV *device); #if defined(PVR_LDM_PCI_MODULE) struct pci_device_id powervr_id_table[] __devinitdata = { {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID)}, #if defined (SYS_SGX_DEV1_DEVICE_ID) {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID)}, #endif {0} }; MODULE_DEVICE_TABLE(pci, powervr_id_table); #endif #if defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) static struct platform_device_id powervr_id_table[] __devinitdata = { {SYS_SGX_DEV_NAME, 0}, {} }; #endif static LDM_DRV powervr_driver = { #if defined(PVR_LDM_PLATFORM_MODULE) .driver = { .name = DRVNAME, }, #endif #if defined(PVR_LDM_PCI_MODULE) .name = DRVNAME, #endif #if defined(PVR_LDM_PCI_MODULE) || defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) .id_table = powervr_id_table, #endif .probe = PVRSRVDriverProbe, #if defined(PVR_LDM_PLATFORM_MODULE) .remove = PVRSRVDriverRemove, #endif #if defined(PVR_LDM_PCI_MODULE) .remove = __devexit_p(PVRSRVDriverRemove), #endif .suspend = PVRSRVDriverSuspend, .resume = PVRSRVDriverResume, .shutdown = PVRSRVDriverShutdown, }; LDM_DEV *gpsPVRLDMDev; #if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE) && \ !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) static void PVRSRVDeviceRelease(struct device unref__ *pDevice) { } static struct platform_device powervr_device = { .name = DEVNAME, .id = -1, .dev = { .release = PVRSRVDeviceRelease } }; #endif #if defined(PVR_LDM_PLATFORM_MODULE) static int PVRSRVDriverProbe(LDM_DEV *pDevice) #endif #if defined(PVR_LDM_PCI_MODULE) static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id) #endif { SYS_DATA *psSysData; PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice)); #if 0 if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) { return -EINVAL; } #endif psSysData = SysAcquireDataNoCheck(); if ( psSysData == IMG_NULL) { gpsPVRLDMDev = pDevice; if (SysInitialise() != PVRSRV_OK) { return -ENODEV; } } return 0; } #if defined (PVR_LDM_PLATFORM_MODULE) static int PVRSRVDriverRemove(LDM_DEV *pDevice) #endif #if defined(PVR_LDM_PCI_MODULE) static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice) #endif { SYS_DATA *psSysData; PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice)); SysAcquireData(&psSysData); #if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) if (gPVRPowerLevel != 0) { if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) { gPVRPowerLevel = 0; } } #endif (void) SysDeinitialise(psSysData); gpsPVRLDMDev = IMG_NULL; #if 0 if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) { return -EINVAL; } #endif #if defined (PVR_LDM_PLATFORM_MODULE) return 0; #endif #if defined (PVR_LDM_PCI_MODULE) return; #endif } #endif #if defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV) PVR_MOD_STATIC void PVRSRVDriverShutdown(LDM_DEV *pDevice) { PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice)); (void) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3); } #endif #if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) #if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state) #else PVR_MOD_STATIC int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state) #endif { #if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice)); if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) { return -EINVAL; } #endif return 0; } #if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) int PVRSRVDriverResume(struct drm_device *pDevice) #else PVR_MOD_STATIC int PVRSRVDriverResume(LDM_DEV *pDevice) #endif { #if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice)); if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) { return -EINVAL; } #endif return 0; } #endif #if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM) IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data) { IMG_CHAR data_buffer[2]; IMG_UINT32 PVRPowerLevel; if (count != sizeof(data_buffer)) { return -EINVAL; } else { if (copy_from_user(data_buffer, buffer, count)) return -EINVAL; if (data_buffer[count - 1] != '\n') return -EINVAL; PVRPowerLevel = data_buffer[0] - '0'; if (PVRPowerLevel != gPVRPowerLevel) { if (PVRPowerLevel != 0) { if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) { return -EINVAL; } } else { if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) { return -EINVAL; } } gPVRPowerLevel = PVRPowerLevel; } } return (count); } void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el) { seq_printf(sfile, "%lu\n", gPVRPowerLevel); } #endif #if defined(SUPPORT_DRI_DRM) int PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile) #else static int PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile) #endif { PVRSRV_FILE_PRIVATE_DATA *psPrivateData; IMG_HANDLE hBlockAlloc; int iRet = -ENOMEM; PVRSRV_ERROR eError; IMG_UINT32 ui32PID; #if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; #endif LinuxLockMutex(&gPVRSRVLock); ui32PID = OSGetCurrentProcessIDKM(); if (PVRSRVProcessConnect(ui32PID, 0) != PVRSRV_OK) goto err_unlock; #if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID); if (psEnvPerProc == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__)); goto err_unlock; } #endif eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_FILE_PRIVATE_DATA), (IMG_PVOID *)&psPrivateData, &hBlockAlloc, "File Private Data"); if(eError != PVRSRV_OK) goto err_unlock; #if defined (SUPPORT_SID_INTERFACE) psPrivateData->hKernelMemInfo = 0; #else psPrivateData->hKernelMemInfo = NULL; #endif #if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) psPrivateData->psDRMFile = pFile; list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead); #endif psPrivateData->ui32OpenPID = ui32PID; psPrivateData->hBlockAlloc = hBlockAlloc; PRIVATE_DATA(pFile) = psPrivateData; iRet = 0; err_unlock: LinuxUnLockMutex(&gPVRSRVLock); return iRet; } #if defined(SUPPORT_DRI_DRM) void PVRSRVRelease(void *pvPrivData) #else static int PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile) #endif { PVRSRV_FILE_PRIVATE_DATA *psPrivateData; LinuxLockMutex(&gPVRSRVLock); #if defined(SUPPORT_DRI_DRM) psPrivateData = (PVRSRV_FILE_PRIVATE_DATA *)pvPrivData; #else psPrivateData = PRIVATE_DATA(pFile); #endif if (psPrivateData != IMG_NULL) { #if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) list_del(&psPrivateData->sDRMAuthListItem); #endif gui32ReleasePID = psPrivateData->ui32OpenPID; PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID); gui32ReleasePID = 0; OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_FILE_PRIVATE_DATA), psPrivateData, psPrivateData->hBlockAlloc); #if !defined(SUPPORT_DRI_DRM) PRIVATE_DATA(pFile) = IMG_NULL; #endif } LinuxUnLockMutex(&gPVRSRVLock); #if !defined(SUPPORT_DRI_DRM) return 0; #endif } #if defined(SUPPORT_DRI_DRM) int PVRCore_Init(void) #else static int __init PVRCore_Init(void) #endif { int error; #if !defined(PVR_LDM_MODULE) PVRSRV_ERROR eError; #else struct device *psDev; #endif #if !defined(SUPPORT_DRI_DRM) PVRDPFInit(); #endif PVR_TRACE(("PVRCore_Init")); LinuxInitMutex(&gPVRSRVLock); if (CreateProcEntries ()) { error = -ENOMEM; return error; } if (PVROSFuncInit() != PVRSRV_OK) { error = -ENOMEM; goto init_failed; } PVRLinuxMUtilsInit(); if(LinuxMMInit() != PVRSRV_OK) { error = -ENOMEM; goto init_failed; } LinuxBridgeInit(); PVRMMapInit(); #if defined(PVR_LDM_MODULE) #if defined(PVR_LDM_PLATFORM_MODULE) if ((error = platform_driver_register(&powervr_driver)) != 0) { PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error)); goto init_failed; } #if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) if ((error = platform_device_register(&powervr_device)) != 0) { platform_driver_unregister(&powervr_driver); PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error)); goto init_failed; } #endif #endif #if defined(PVR_LDM_PCI_MODULE) if ((error = pci_register_driver(&powervr_driver)) != 0) { PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error)); goto init_failed; } #endif #else if ((eError = SysInitialise()) != PVRSRV_OK) { error = -ENODEV; #if defined(TCF_REV) && (TCF_REV == 110) if(eError == PVRSRV_ERROR_NOT_SUPPORTED) { printk("\nAtlas wrapper (FPGA image) version mismatch"); error = -ENODEV; } #endif goto init_failed; } #endif #if !defined(SUPPORT_DRI_DRM) AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops); if (AssignedMajorNumber <= 0) { PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number")); error = -EBUSY; goto sys_deinit; } PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber)); #endif #if defined(PVR_LDM_MODULE) psPvrClass = class_create(THIS_MODULE, "pvr"); if (IS_ERR(psPvrClass)) { PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass))); error = -EBUSY; goto unregister_device; } psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0), #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) NULL, #endif DEVNAME); if (IS_ERR(psDev)) { PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev))); error = -EBUSY; goto destroy_class; } #endif return 0; #if defined(PVR_LDM_MODULE) destroy_class: class_destroy(psPvrClass); unregister_device: unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME); #endif #if !defined(SUPPORT_DRI_DRM) sys_deinit: #endif #if defined(PVR_LDM_MODULE) #if defined(PVR_LDM_PCI_MODULE) pci_unregister_driver(&powervr_driver); #endif #if defined (PVR_LDM_PLATFORM_MODULE) #if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) platform_device_unregister(&powervr_device); #endif platform_driver_unregister(&powervr_driver); #endif #else { SYS_DATA *psSysData; psSysData = SysAcquireDataNoCheck(); if (psSysData != IMG_NULL) { (void) SysDeinitialise(psSysData); } } #endif init_failed: PVRMMapCleanup(); LinuxMMCleanup(); LinuxBridgeDeInit(); PVROSFuncDeInit(); RemoveProcEntries(); return error; } #if defined(SUPPORT_DRI_DRM) void PVRCore_Cleanup(void) #else static void __exit PVRCore_Cleanup(void) #endif { #if !defined(PVR_LDM_MODULE) SYS_DATA *psSysData; #endif PVR_TRACE(("PVRCore_Cleanup")); #if !defined(PVR_LDM_MODULE) SysAcquireData(&psSysData); #endif #if defined(PVR_LDM_MODULE) device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0)); class_destroy(psPvrClass); #endif #if !defined(SUPPORT_DRI_DRM) #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) if ( #endif unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME) #if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) ; #else ) { PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber)); } #endif #endif #if defined(PVR_LDM_MODULE) #if defined(PVR_LDM_PCI_MODULE) pci_unregister_driver(&powervr_driver); #endif #if defined (PVR_LDM_PLATFORM_MODULE) #if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) platform_device_unregister(&powervr_device); #endif platform_driver_unregister(&powervr_driver); #endif #else #if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) if (gPVRPowerLevel != 0) { if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) { gPVRPowerLevel = 0; } } #endif (void) SysDeinitialise(psSysData); #endif PVRMMapCleanup(); LinuxMMCleanup(); LinuxBridgeDeInit(); PVROSFuncDeInit(); RemoveProcEntries(); PVR_TRACE(("PVRCore_Cleanup: unloading")); } #if !defined(SUPPORT_DRI_DRM) module_init(PVRCore_Init); module_exit(PVRCore_Cleanup); #endif
Java
/* driver/i2c/chip/tap6185.c * * TI rt5501 Speaker Amp * * Copyright (C) 2010 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/miscdevice.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/workqueue.h> #include <linux/freezer.h> #include <mach/rt5501.h> #include <linux/mutex.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/module.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <mach/htc_headset_mgr.h> #include <linux/wakelock.h> #include <mach/htc_acoustic_pmic.h> #include <linux/jiffies.h> #undef pr_info #undef pr_err #define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__) #ifdef CONFIG_AMP_RT5501_ON_GPIO #define DEBUG (1) #else #define DEBUG (1) #endif #define AMP_ON_CMD_LEN 7 #define RETRY_CNT 5 #define DRIVER_NAME "RT5501" struct headset_query { struct mutex mlock; struct mutex gpiolock; struct delayed_work hs_imp_detec_work; struct wake_lock hs_wake_lock; struct wake_lock gpio_wake_lock; enum HEADSET_QUERY_STATUS hs_qstatus; enum RT5501_STATUS rt5501_status; enum HEADSET_OM headsetom; enum RT5501_Mode curmode; enum AMP_GPIO_STATUS gpiostatus; enum AMP_S4_STATUS s4status; int action_on; int gpio_off_cancel; struct mutex actionlock; struct delayed_work volume_ramp_work; struct delayed_work gpio_off_work; }; static struct i2c_client *this_client; static struct rt5501_platform_data *pdata; static int rt5501Connect = 0; static int MFG_MODE = 0; struct rt5501_config_data rt5501_config_data; static struct mutex hp_amp_lock; static int rt5501_opened; static int last_spkamp_state; struct rt5501_config RT5501_AMP_ON = {6,{{0x1,0x1c},{0x2,0x00},{0x7,0x7f},{0x9,0x1},{0xa,0x0},{0xb,0xc7},}}; struct rt5501_config RT5501_AMP_INIT = {11,{{0,0xc0},{0x81,0x30},{0x87,0xf6},{0x93,0x8d},{0x95,0x7d},{0xa4,0x52},\ {0x96,0xae},{0x97,0x13},{0x99,0x35},{0x9b,0x68},{0x9d,0x68},}}; struct rt5501_config RT5501_AMP_MUTE = {1,{{0x1,0xC7},}};; struct rt5501_config RT5501_AMP_OFF = {1,{{0x0,0x1},}}; static int rt5501_write_reg(u8 reg, u8 val); static int rt5501_i2c_write_for_read(char *txData, int length); static int rt5501_i2c_read(char *rxData, int length); static void hs_imp_detec_func(struct work_struct *work); static int rt5501_i2c_read_addr(char *rxData, unsigned char addr); static int rt5501_i2c_write(struct rt5501_reg_data *txData, int length); static void set_amp(int on, struct rt5501_config *i2c_command); struct headset_query rt5501_query; static struct workqueue_struct *hs_wq; static struct workqueue_struct *ramp_wq; static struct workqueue_struct *gpio_wq; static int high_imp = 0; #if 0 static int query_playback(void *pdata) { return 0; } #endif static int rt5501_headset_detect(int on) { if(on) { pr_info("%s: headset in ++\n",__func__); mutex_lock(&rt5501_query.mlock); rt5501_query.hs_qstatus = RT5501_QUERY_HEADSET; rt5501_query.headsetom = HEADSET_OM_UNDER_DETECT; mutex_unlock(&rt5501_query.mlock); cancel_delayed_work_sync(&rt5501_query.hs_imp_detec_work); mutex_lock(&rt5501_query.gpiolock); mutex_lock(&rt5501_query.mlock); if(rt5501_query.rt5501_status == RT5501_PLAYBACK) { if(high_imp) { rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } last_spkamp_state = 0; pr_info("%s: OFF\n", __func__); rt5501_query.rt5501_status = RT5501_SUSPEND; } pr_info("%s: headset in --\n",__func__); mutex_unlock(&rt5501_query.mlock); mutex_unlock(&rt5501_query.gpiolock); queue_delayed_work(hs_wq,&rt5501_query.hs_imp_detec_work,msecs_to_jiffies(5)); pr_info("%s: headset in --2\n",__func__); } else { pr_info("%s: headset remove ++\n",__func__); flush_work_sync(&rt5501_query.volume_ramp_work.work); mutex_lock(&rt5501_query.mlock); rt5501_query.hs_qstatus = RT5501_QUERY_OFF; rt5501_query.headsetom = HEADSET_OM_UNDER_DETECT; mutex_unlock(&rt5501_query.mlock); cancel_delayed_work_sync(&rt5501_query.hs_imp_detec_work); mutex_lock(&rt5501_query.gpiolock); mutex_lock(&rt5501_query.mlock); if(rt5501_query.rt5501_status == RT5501_PLAYBACK) { if(high_imp) { rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } last_spkamp_state = 0; pr_info("%s: OFF\n", __func__); rt5501_query.rt5501_status = RT5501_SUSPEND; } rt5501_query.curmode = RT5501_MODE_OFF; pr_info("%s: headset remove --1\n",__func__); if(high_imp) { int closegpio = 0; if((rt5501_query.gpiostatus == AMP_GPIO_OFF) && pdata->gpio_rt5501_spk_en) { if(rt5501_query.s4status == AMP_S4_AUTO) { pm8921_aud_set_s4_pwm(); rt5501_query.s4status = AMP_S4_PWM; msleep(1); } pr_info("%s: enable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); rt5501_query.gpiostatus = AMP_GPIO_ON; closegpio = 1; msleep(1); } pr_info("%s: reset rt5501\n",__func__); rt5501_write_reg(0x0,0x4); mdelay(1); rt5501_write_reg(0x1,0xc7); high_imp = 0; if(closegpio && (rt5501_query.gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { pr_info("%s: disable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 0); rt5501_query.gpiostatus = AMP_GPIO_OFF; if(rt5501_query.s4status == AMP_S4_PWM) { pm8921_aud_set_s4_auto(); rt5501_query.s4status = AMP_S4_AUTO; } } } mutex_unlock(&rt5501_query.mlock); mutex_unlock(&rt5501_query.gpiolock); pr_info("%s: headset remove --2\n",__func__); } return 0; } static int rt5501_write_reg(u8 reg, u8 val) { int err; struct i2c_msg msg[1]; unsigned char data[2]; msg->addr = this_client->addr; msg->flags = 0; msg->len = 2; msg->buf = data; data[0] = reg; data[1] = val; pr_info("%s: write reg 0x%x val 0x%x\n",__func__,data[0],data[1]); err = i2c_transfer(this_client->adapter, msg, 1); if (err >= 0) return 0; else { pr_info("%s: write error error %d\n",__func__,err); return err; } } static int rt5501_i2c_write(struct rt5501_reg_data *txData, int length) { int i, retry, pass = 0; char buf[2]; struct i2c_msg msg[] = { { .addr = this_client->addr, .flags = 0, .len = 2, .buf = buf, }, }; for (i = 0; i < length; i++) { buf[0] = txData[i].addr; buf[1] = txData[i].val; #if DEBUG pr_info("%s:i2c_write addr 0x%x val 0x%x\n", __func__,buf[0], buf[1]); #endif msg->buf = buf; retry = RETRY_CNT; pass = 0; while (retry--) { if (i2c_transfer(this_client->adapter, msg, 1) < 0) { pr_err("%s: I2C transfer error %d retry %d\n", __func__, i, retry); msleep(20); } else { pass = 1; break; } } if (pass == 0) { pr_err("I2C transfer error, retry fail\n"); return -EIO; } } return 0; } static int rt5501_i2c_write_for_read(char *txData, int length) { int i, retry, pass = 0; char buf[2]; struct i2c_msg msg[] = { { .addr = this_client->addr, .flags = 0, .len = 2, .buf = buf, }, }; for (i = 0; i < length; i++) { buf[0] = i; buf[1] = txData[i]; #if DEBUG pr_info("i2c_write %d=%x\n", i, buf[1]); #endif msg->buf = buf; retry = RETRY_CNT; pass = 0; while (retry--) { if (i2c_transfer(this_client->adapter, msg, 1) < 0) { pr_err("%s: I2C transfer error %d retry %d\n", __func__, i, retry); msleep(20); } else { pass = 1; break; } } if (pass == 0) { pr_err("I2C transfer error, retry fail\n"); return -EIO; } } return 0; } static int rt5501_i2c_read(char *rxData, int length) { int rc; struct i2c_msg msgs[] = { { .addr = this_client->addr, .flags = I2C_M_RD, .len = length, .buf = rxData, }, }; rc = i2c_transfer(this_client->adapter, msgs, 1); if (rc < 0) { pr_err("%s: transfer error %d\n", __func__, rc); return rc; } { int i = 0; for (i = 0; i < length; i++) pr_info("i2c_read %s: rx[%d] = 0x%x\n", __func__, i, \ rxData[i]); } return 0; } static int rt5501_i2c_read_addr(char *rxData, unsigned char addr) { int rc; struct i2c_msg msgs[] = { { .addr = this_client->addr, .flags = 0, .len = 1, .buf = rxData, }, { .addr = this_client->addr, .flags = I2C_M_RD, .len = 1, .buf = rxData, }, }; if(!rxData) return -1; *rxData = addr; rc = i2c_transfer(this_client->adapter, &msgs[0], 1); if (rc < 0) { pr_err("%s: transfer error %d\n", __func__, rc); return rc; } rc = i2c_transfer(this_client->adapter, &msgs[1], 1); if (rc < 0) { pr_err("%s: transfer error %d\n", __func__, rc); return rc; } pr_info("%s:i2c_read addr 0x%x value = 0x%x\n", __func__, addr, *rxData); return 0; } static int rt5501_open(struct inode *inode, struct file *file) { int rc = 0; mutex_lock(&hp_amp_lock); if (rt5501_opened) { pr_err("%s: busy\n", __func__); rc = -EBUSY; goto done; } rt5501_opened = 1; done: mutex_unlock(&hp_amp_lock); return rc; } static int rt5501_release(struct inode *inode, struct file *file) { mutex_lock(&hp_amp_lock); rt5501_opened = 0; mutex_unlock(&hp_amp_lock); return 0; } #if 0 static int init_rt5501(void) { int ret; ret = rt5501_i2c_write(RT5501_AMP_INIT.reg, RT5501_AMP_INIT.reg_len); if(ret < 0) { pr_err("init rt5501 error %d\n",ret); return ret; } #if 0 ret = rt5501_i2c_write(RT5501_AMP_ON.reg, RT5501_AMP_ON.reg_len); if(ret < 0) { pr_err("init rt5501 to playback error %d\n",ret); return ret; } ret = rt5501_i2c_write(RT5501_AMP_MUTE.reg, RT5501_AMP_MUTE.reg_len); if(ret < 0) { pr_err("init rt5501 to mute error %d\n",ret); return ret; } ret = rt5501_i2c_write(RT5501_AMP_OFF.reg, RT5501_AMP_OFF.reg_len); if(ret < 0) { pr_err("init rt5501 to off error %d\n",ret); return ret; } #endif return ret; } #endif static void hs_imp_gpio_off(struct work_struct *work) { u64 timeout = get_jiffies_64() + 5*HZ; wake_lock(&rt5501_query.gpio_wake_lock); while(1) { if(time_after64(get_jiffies_64(),timeout)) break; else if(rt5501_query.gpio_off_cancel) { wake_unlock(&rt5501_query.gpio_wake_lock); return; } else msleep(10); } mutex_lock(&rt5501_query.gpiolock); pr_info("%s: disable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 0); rt5501_query.gpiostatus = AMP_GPIO_OFF; if(rt5501_query.s4status == AMP_S4_PWM) { pm8921_aud_set_s4_auto(); rt5501_query.s4status = AMP_S4_AUTO; } mutex_unlock(&rt5501_query.gpiolock); wake_unlock(&rt5501_query.gpio_wake_lock); } static void hs_imp_detec_func(struct work_struct *work) { struct headset_query *hs; char temp[8]={0x1,}; int ret; int rt5501_status; pr_info("%s: read rt5501 hs imp \n",__func__); hs = container_of(work, struct headset_query, hs_imp_detec_work.work); wake_lock(&hs->hs_wake_lock); rt5501_query.gpio_off_cancel = 1; cancel_delayed_work_sync(&rt5501_query.gpio_off_work); mutex_lock(&hs->gpiolock); mutex_lock(&hs->mlock); if(hs->hs_qstatus != RT5501_QUERY_HEADSET) { mutex_unlock(&hs->mlock); mutex_unlock(&hs->gpiolock); wake_unlock(&hs->hs_wake_lock); return; } if((hs->gpiostatus == AMP_GPIO_OFF) && pdata->gpio_rt5501_spk_en) { if(rt5501_query.s4status == AMP_S4_AUTO) { pm8921_aud_set_s4_pwm(); rt5501_query.s4status = AMP_S4_PWM; msleep(1); } pr_info("%s: enable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); rt5501_query.gpiostatus = AMP_GPIO_ON; } msleep(1); rt5501_write_reg(0,0x04); rt5501_write_reg(0xa4,0x52); rt5501_write_reg(1,0x7); msleep(10); rt5501_write_reg(0x3,0x81); msleep(101); #if 0 rt5501_i2c_read_addr(temp,0x0); rt5501_i2c_read_addr(temp,0x1); rt5501_i2c_read_addr(temp,0x2); rt5501_i2c_read_addr(temp,0x3); rt5501_i2c_read_addr(temp,0x5); rt5501_i2c_read_addr(temp,0x6); #endif ret = rt5501_i2c_read_addr(temp,0x4); if(ret < 0) { pr_err("%s: read rt5501 status error %d\n",__func__,ret); if((hs->gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { rt5501_query.gpio_off_cancel = 0; queue_delayed_work(gpio_wq, &rt5501_query.gpio_off_work, msecs_to_jiffies(0)); } mutex_unlock(&hs->mlock); mutex_unlock(&hs->gpiolock); wake_unlock(&hs->hs_wake_lock); return; } rt5501_write_reg(0x0,0x4); mdelay(1); #if 0 init_rt5501(); #endif rt5501_write_reg(0x0,0xc0); rt5501_write_reg(0x81,0x30); rt5501_write_reg(0x87,0xf6); rt5501_write_reg(0x90,0xd0); rt5501_write_reg(0x93,0x9d); rt5501_write_reg(0x95,0x7b); rt5501_write_reg(0xa4,0x01); rt5501_write_reg(0x96,0xae); rt5501_write_reg(0x97,0x11); rt5501_write_reg(0x98,0x22); rt5501_write_reg(0x99,0x44); rt5501_write_reg(0x9a,0x55); rt5501_write_reg(0x9b,0x66); rt5501_write_reg(0x9c,0x99); rt5501_write_reg(0x9d,0x66); rt5501_write_reg(0x9e,0x99); high_imp = 0; if(temp[0] & RT5501_SENSE_READY) { unsigned char om, hsmode; enum HEADSET_OM hsom; hsmode = (temp[0] & 0x30) >> 4; om = (temp[0] & 0xe) >> 1; if(temp[0] == 0xc0 || temp[0] == 0xc1) { hsom = HEADSET_MONO; } else { switch(om) { case 0: hsom = HEADSET_8OM; break; case 1: hsom = HEADSET_16OM; break; case 2: hsom = HEADSET_32OM; break; case 3: hsom = HEADSET_64OM; break; case 4: hsom = HEADSET_128OM; break; case 5: hsom = HEADSET_256OM; break; case 6: hsom = HEADSET_500OM; break; case 7: hsom = HEADSET_1KOM; break; default: hsom = HEADSET_OM_UNDER_DETECT; break; } } hs->hs_qstatus = RT5501_QUERY_FINISH; hs->headsetom = hsom; if(om >= HEADSET_256OM && om <= HEADSET_1KOM) high_imp = 1; pr_info("rt5501 hs imp value 0x%x hsmode %d om 0x%x hsom %d high_imp %d\n",temp[0] & 0xf,hsmode,om,hsom,high_imp); if((hs->gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { rt5501_query.gpio_off_cancel = 0; queue_delayed_work(gpio_wq, &rt5501_query.gpio_off_work, msecs_to_jiffies(0)); } } else { if(hs->hs_qstatus == RT5501_QUERY_HEADSET) queue_delayed_work(hs_wq,&rt5501_query.hs_imp_detec_work,QUERY_LATTER); } rt5501_status = hs->rt5501_status; if(high_imp) { rt5501_write_reg(0xb1,0x81); rt5501_write_reg(0x80,0x87); rt5501_write_reg(0x83,0xc3); rt5501_write_reg(0x84,0x63); rt5501_write_reg(0x89,0x7); mdelay(9); rt5501_write_reg(0x83,0xcf); rt5501_write_reg(0x89,0x1d); mdelay(1); rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } mutex_unlock(&hs->mlock); mutex_unlock(&hs->gpiolock); if(rt5501_status == RT5501_SUSPEND) set_rt5501_amp(1); wake_unlock(&hs->hs_wake_lock); } static void volume_ramp_func(struct work_struct *work) { mutex_lock(&rt5501_query.actionlock); if(rt5501_query.rt5501_status != RT5501_PLAYBACK) { u8 val; pr_info("%s: ramping-------------------------\n",__func__); mdelay(1); if(high_imp) rt5501_write_reg(0xb1,0x80); rt5501_write_reg(0x2,0x0); mdelay(1); val = 0x7; if (MFG_MODE) { pr_info("Skip volume ramp for MFG build"); val += 15; rt5501_write_reg(1,val); } else { #if 1 int i; for(i=0; i<15; i++) { if(!rt5501_query.action_on) { mutex_unlock(&rt5501_query.actionlock); return; } msleep(1); rt5501_write_reg(1,val); val++; } #else for(i=0; i<8; i++) { msleep(10); rt5501_write_reg(1,val); val += 2; } #endif } } set_amp(1, &RT5501_AMP_ON); mutex_unlock(&rt5501_query.actionlock); } static void set_amp(int on, struct rt5501_config *i2c_command) { pr_info("%s: %d\n", __func__, on); mutex_lock(&rt5501_query.mlock); mutex_lock(&hp_amp_lock); if(rt5501_query.hs_qstatus == RT5501_QUERY_HEADSET) rt5501_query.hs_qstatus = RT5501_QUERY_FINISH; if (on) { rt5501_query.rt5501_status = RT5501_PLAYBACK; if (rt5501_i2c_write(i2c_command->reg, i2c_command->reg_len) == 0) { last_spkamp_state = 1; pr_info("%s: ON \n", __func__); } } else { if(high_imp) { rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } if(rt5501_query.rt5501_status == RT5501_PLAYBACK) { last_spkamp_state = 0; pr_info("%s: OFF\n", __func__); } rt5501_query.rt5501_status = RT5501_OFF; rt5501_query.curmode = RT5501_MODE_OFF; } mutex_unlock(&hp_amp_lock); mutex_unlock(&rt5501_query.mlock); } int query_rt5501(void) { return rt5501Connect; } void set_rt5501_amp(int on) { pr_info("%s: %d\n", __func__, on); rt5501_query.gpio_off_cancel = 1; if(!on) rt5501_query.action_on = 0; cancel_delayed_work_sync(&rt5501_query.gpio_off_work); cancel_delayed_work_sync(&rt5501_query.volume_ramp_work); flush_work_sync(&rt5501_query.volume_ramp_work.work); mutex_lock(&rt5501_query.gpiolock); if(on) { if((rt5501_query.gpiostatus == AMP_GPIO_OFF) && pdata->gpio_rt5501_spk_en) { if(rt5501_query.s4status == AMP_S4_AUTO) { pm8921_aud_set_s4_pwm(); rt5501_query.s4status = AMP_S4_PWM; msleep(1); } #ifdef CONFIG_AMP_RT5501_DELAY msleep(50); #endif pr_info("%s: enable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); rt5501_query.gpiostatus = AMP_GPIO_ON; msleep(1); } rt5501_query.action_on = 1; queue_delayed_work(ramp_wq, &rt5501_query.volume_ramp_work, msecs_to_jiffies(0)); } else { set_amp(0, &RT5501_AMP_ON); if((rt5501_query.gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { rt5501_query.gpio_off_cancel = 0; queue_delayed_work(gpio_wq, &rt5501_query.gpio_off_work, msecs_to_jiffies(0)); } } mutex_unlock(&rt5501_query.gpiolock); } static int update_amp_parameter(int mode) { if (mode >= rt5501_config_data.mode_num) return -EINVAL; pr_info("%s: set mode %d\n", __func__, mode); if (mode == RT5501_MODE_OFF) memcpy(&RT5501_AMP_OFF, &rt5501_config_data.cmd_data[mode].config, sizeof(struct rt5501_config)); else if (mode == RT5501_INIT) memcpy(&RT5501_AMP_INIT, &rt5501_config_data.cmd_data[mode].config, sizeof(struct rt5501_config)); else if (mode == RT5501_MUTE) memcpy(&RT5501_AMP_MUTE, &rt5501_config_data.cmd_data[mode].config, sizeof(struct rt5501_config)); else { memcpy(&RT5501_AMP_ON, &rt5501_config_data.cmd_data[mode].config, sizeof(struct rt5501_config)); } return 0; } static long rt5501_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int rc = 0, modeid = 0; unsigned char tmp[7]; unsigned char reg_idx[1] = {0x01}; struct rt5501_comm_data spk_cfg; unsigned char reg_value[2]; int premode = 0; int rt5501_status = 0; switch (cmd) { case RT5501_WRITE_REG: pr_info("%s: RT5501_WRITE_REG\n", __func__); mutex_lock(&hp_amp_lock); if (!last_spkamp_state) { mdelay(30); } if (copy_from_user(reg_value, argp, sizeof(reg_value))) goto err1; pr_info("%s: reg_value[0]=%2x, reg_value[1]=%2x\n", __func__, \ reg_value[0], reg_value[1]); rc = rt5501_write_reg(reg_value[0], reg_value[1]); err1: mutex_unlock(&hp_amp_lock); break; case RT5501_SET_CONFIG: if (copy_from_user(&spk_cfg, argp, sizeof(struct rt5501_comm_data))) return -EFAULT; memcpy(&RT5501_AMP_ON, &spk_cfg.config, sizeof(struct rt5501_config)); break; case RT5501_READ_CONFIG: mutex_lock(&hp_amp_lock); if (!last_spkamp_state) { mdelay(30); } rc = rt5501_i2c_write_for_read(reg_idx, sizeof(reg_idx)); if (rc < 0) goto err2; rc = rt5501_i2c_read(tmp, sizeof(tmp)); if (rc < 0) goto err2; if (copy_to_user(argp, &tmp, sizeof(tmp))) rc = -EFAULT; err2: mutex_unlock(&hp_amp_lock); break; case RT5501_SET_MODE: if (copy_from_user(&modeid, argp, sizeof(modeid))) return -EFAULT; if (modeid >= rt5501_config_data.mode_num || modeid <= 0) { pr_err("unsupported rt5501 mode %d\n", modeid); return -EINVAL; } mutex_lock(&hp_amp_lock); premode = rt5501_query.curmode; rt5501_query.curmode = modeid; rc = update_amp_parameter(modeid); rt5501_status = rt5501_query.rt5501_status; mutex_unlock(&hp_amp_lock); pr_info("%s:set rt5501 mode to %d curstatus %d\n", __func__,modeid,rt5501_status); if(rt5501_status == RT5501_SUSPEND || (rt5501_status == RT5501_PLAYBACK && premode != rt5501_query.curmode)) { flush_work_sync(&rt5501_query.volume_ramp_work.work); mutex_lock(&rt5501_query.actionlock); rt5501_query.action_on = 1; mutex_unlock(&rt5501_query.actionlock); queue_delayed_work(ramp_wq, &rt5501_query.volume_ramp_work, msecs_to_jiffies(280)); } break; case RT5501_SET_PARAM: if (copy_from_user(&rt5501_config_data.mode_num, argp, sizeof(unsigned int))) { pr_err("%s: copy from user failed.\n", __func__); return -EFAULT; } if (rt5501_config_data.mode_num <= 0) { pr_err("%s: invalid mode number %d\n", __func__, rt5501_config_data.mode_num); return -EINVAL; } if (rt5501_config_data.cmd_data == NULL) rt5501_config_data.cmd_data = kzalloc(sizeof(struct rt5501_comm_data)*rt5501_config_data.mode_num, GFP_KERNEL); if (!rt5501_config_data.cmd_data) { pr_err("%s: out of memory\n", __func__); return -ENOMEM; } if (copy_from_user(rt5501_config_data.cmd_data, ((struct rt5501_config_data*)argp)->cmd_data \ ,sizeof(struct rt5501_comm_data)*rt5501_config_data.mode_num)) { pr_err("%s: copy data from user failed.\n", __func__); kfree(rt5501_config_data.cmd_data); rt5501_config_data.cmd_data = NULL; return -EFAULT; } pr_info("%s: update rt5501 i2c commands #%d success.\n", __func__, rt5501_config_data.mode_num); mutex_lock(&hp_amp_lock); update_amp_parameter(RT5501_MODE_OFF); update_amp_parameter(RT5501_MUTE); update_amp_parameter(RT5501_INIT); mutex_unlock(&hp_amp_lock); rc = 0; break; case RT5501_QUERY_OM: mutex_lock(&rt5501_query.mlock); rc = rt5501_query.headsetom; mutex_unlock(&rt5501_query.mlock); pr_info("%s: query headset om %d\n", __func__,rc); if (copy_to_user(argp, &rc, sizeof(rc))) rc = -EFAULT; else rc = 0; break; default: pr_err("%s: Invalid command\n", __func__); rc = -EINVAL; break; } return rc; } static struct file_operations rt5501_fops = { .owner = THIS_MODULE, .open = rt5501_open, .release = rt5501_release, .unlocked_ioctl = rt5501_ioctl, }; static struct miscdevice rt5501_device = { .minor = MISC_DYNAMIC_MINOR, .name = "rt5501", .fops = &rt5501_fops, }; int rt5501_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; int err = 0; MFG_MODE = board_mfg_mode(); pdata = client->dev.platform_data; if (pdata == NULL) { pr_info("%s: platform data null\n", __func__); pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (pdata == NULL) { ret = -ENOMEM; pr_err("%s: platform data is NULL\n", __func__); goto err_alloc_data_failed; } } this_client = client; if (ret < 0) { pr_err("%s: pmic request aud_spk_en pin failed\n", __func__); goto err_free_gpio_all; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("%s: i2c check functionality error\n", __func__); ret = -ENODEV; goto err_free_gpio_all; } if(pdata->gpio_rt5501_spk_en) { char temp[2]; err = gpio_request(pdata->gpio_rt5501_spk_en, "hp_en"); ret = gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); if(ret < 0) { pr_err("%s: gpio %d on error %d\n", __func__,pdata->gpio_rt5501_spk_en,ret); } mdelay(1); ret = rt5501_i2c_read(temp, 2); if(ret < 0) { pr_info("rt5501 is not connected\n"); rt5501Connect = 0; } else { pr_info("rt5501 is connected\n"); rt5501Connect = 1; } rt5501_write_reg(0x0,0x4); mdelay(1); rt5501_write_reg(0x0,0xc0); rt5501_write_reg(0x81,0x30); rt5501_write_reg(0x87,0xf6); rt5501_write_reg(0x90,0xd0); rt5501_write_reg(0x93,0x9d); rt5501_write_reg(0x95,0x7b); rt5501_write_reg(0xa4,0x01); rt5501_write_reg(0x96,0xae); rt5501_write_reg(0x97,0x11); rt5501_write_reg(0x98,0x22); rt5501_write_reg(0x99,0x44); rt5501_write_reg(0x9a,0x55); rt5501_write_reg(0x9b,0x66); rt5501_write_reg(0x9c,0x99); rt5501_write_reg(0x9d,0x66); rt5501_write_reg(0x9e,0x99); rt5501_write_reg(0x1,0xc7); gpio_direction_output(pdata->gpio_rt5501_spk_en, 0); if(!err) gpio_free(pdata->gpio_rt5501_spk_en); if(ret < 0) { pr_err("%s: gpio %d off error %d\n", __func__,pdata->gpio_rt5501_spk_en,ret); } } if(rt5501Connect) { struct headset_notifier notifier; ret = misc_register(&rt5501_device); if (ret) { pr_err("%s: rt5501_device register failed\n", __func__); goto err_free_gpio_all; } hs_wq = create_workqueue("rt5501_hsdetect"); INIT_DELAYED_WORK(&rt5501_query.hs_imp_detec_work,hs_imp_detec_func); wake_lock_init(&rt5501_query.hs_wake_lock, WAKE_LOCK_SUSPEND, DRIVER_NAME); wake_lock_init(&rt5501_query.gpio_wake_lock, WAKE_LOCK_SUSPEND, DRIVER_NAME); ramp_wq = create_workqueue("rt5501_volume_ramp"); INIT_DELAYED_WORK(&rt5501_query.volume_ramp_work, volume_ramp_func); gpio_wq = create_workqueue("rt5501_gpio_off"); INIT_DELAYED_WORK(&rt5501_query.gpio_off_work, hs_imp_gpio_off); notifier.id = HEADSET_REG_HS_INSERT; notifier.func = rt5501_headset_detect; headset_notifier_register(&notifier); } return 0; err_free_gpio_all: rt5501Connect = 0; return ret; err_alloc_data_failed: rt5501Connect = 0; return ret; } static int rt5501_remove(struct i2c_client *client) { struct rt5501_platform_data *p6185data = i2c_get_clientdata(client); kfree(p6185data); if(rt5501Connect) { misc_deregister(&rt5501_device); cancel_delayed_work_sync(&rt5501_query.hs_imp_detec_work); destroy_workqueue(hs_wq); } return 0; } static void rt5501_shutdown(struct i2c_client *client) { rt5501_query.gpio_off_cancel = 1; cancel_delayed_work_sync(&rt5501_query.gpio_off_work); cancel_delayed_work_sync(&rt5501_query.volume_ramp_work); mutex_lock(&rt5501_query.gpiolock); mutex_lock(&hp_amp_lock); mutex_lock(&rt5501_query.mlock); if((rt5501_query.gpiostatus == AMP_GPIO_OFF) && pdata->gpio_rt5501_spk_en) { if(rt5501_query.s4status == AMP_S4_AUTO) { pm8921_aud_set_s4_pwm(); rt5501_query.s4status = AMP_S4_PWM; msleep(1); } pr_info("%s: enable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); rt5501_query.gpiostatus = AMP_GPIO_ON; msleep(1); } pr_info("%s: reset rt5501\n",__func__); rt5501_write_reg(0x0,0x4); mdelay(1); high_imp = 0; if((rt5501_query.gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { pr_info("%s: disable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 0); rt5501_query.gpiostatus = AMP_GPIO_OFF; if(rt5501_query.s4status == AMP_S4_PWM) { pm8921_aud_set_s4_auto(); rt5501_query.s4status = AMP_S4_AUTO; } } mutex_unlock(&rt5501_query.mlock); mutex_unlock(&hp_amp_lock); mutex_unlock(&rt5501_query.gpiolock); } static int rt5501_suspend(struct i2c_client *client, pm_message_t mesg) { return 0; } static int rt5501_resume(struct i2c_client *client) { return 0; } static const struct i2c_device_id rt5501_id[] = { { RT5501_I2C_NAME, 0 }, { } }; static struct i2c_driver rt5501_driver = { .probe = rt5501_probe, .remove = rt5501_remove, .shutdown = rt5501_shutdown, .suspend = rt5501_suspend, .resume = rt5501_resume, .id_table = rt5501_id, .driver = { .name = RT5501_I2C_NAME, }, }; static int __init rt5501_init(void) { pr_info("%s\n", __func__); mutex_init(&hp_amp_lock); mutex_init(&rt5501_query.mlock); mutex_init(&rt5501_query.gpiolock); mutex_init(&rt5501_query.actionlock); rt5501_query.rt5501_status = RT5501_OFF; rt5501_query.hs_qstatus = RT5501_QUERY_OFF; rt5501_query.headsetom = HEADSET_8OM; rt5501_query.curmode = RT5501_MODE_OFF; rt5501_query.gpiostatus = AMP_GPIO_OFF; rt5501_query.s4status = AMP_S4_AUTO; return i2c_add_driver(&rt5501_driver); } static void __exit rt5501_exit(void) { i2c_del_driver(&rt5501_driver); } module_init(rt5501_init); module_exit(rt5501_exit); MODULE_DESCRIPTION("rt5501 Speaker Amp driver"); MODULE_LICENSE("GPL");
Java
<?php /** * Redux Framework is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * any later version. * Redux Framework is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with Redux Framework. If not, see <http://www.gnu.org/licenses/>. * * @package ReduxFramework * @subpackage Field_thinkup_section * @author Tobias Karnetze (athoss.de) * @version 1.0.0 */ // Exit if accessed directly if ( ! defined( 'ABSPATH' ) ) { exit; } // Don't duplicate me! if ( ! class_exists( 'ReduxFramework_thinkup_section' ) ) { /** * Main ReduxFramework_thinkup_section class * * @since 1.0.0 */ class ReduxFramework_thinkup_section { /** * Field Constructor. * Required - must call the parent constructor, then assign field and value to vars, and obviously call the render field function * * @since 1.0.0 * @access public * @return void */ public function __construct( $field = array(), $value = '', $parent ) { $this->parent = $parent; $this->field = $field; $this->value = $value; if ( empty( $this->_extension_dir ) ) { $this->_extension_dir = trailingslashit( str_replace( '\\', '/', dirname( __FILE__ ) ) ); $this->_extension_url = site_url( str_replace( trailingslashit( str_replace( '\\', '/', ABSPATH ) ), '/', $this->_extension_dir ) ); } } /** * Field Render Function. * Takes the vars and outputs the HTML for the field in the settings * * @since 1.0.0 * @access public * @return void */ public function render() { // Default Redux title field is used to output section title // delete the tr afterwards } public function enqueue() { } } }
Java
""" accounts.test_views =================== Tests the REST API calls. Add more specific social registration tests """ import responses from django.core.urlresolvers import reverse from django.core import mail from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from django.test.utils import override_settings from rest_framework import status from rest_framework.test import APIClient, APITestCase from allauth.account import app_settings from allauth.socialaccount.models import SocialApp from allauth.socialaccount.providers.facebook.provider import GRAPH_API_URL from .serializers import LoginSerializer class TestAccounts(APITestCase): """ Tests normal use - non social login. """ def setUp(self): self.login_url = reverse('accounts:rest_login') self.logout_url = reverse('accounts:rest_logout') self.register_url = reverse('accounts:rest_register') self.password_reset_url = reverse('accounts:rest_password_reset') self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm') self.password_change_url = reverse('accounts:rest_password_change') self.verify_url = reverse('accounts:rest_verify_email') self.user_url = reverse('accounts:rest_user_details') self.client = APIClient() self.reusable_user_data = {'username': 'admin', 'email': '[email protected]', 'password': 'password12'} self.reusable_user_data_change_password = {'username': 'admin', 'email': '[email protected]', 'password': 'password_same'} self.reusable_register_user_data = {'username': 'admin', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data1 = {'username': 'admin1', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data_no_username = {'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'} self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"} self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"} self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"} def create_user_and_login(self): """ Helper function to create a basic user, login and assign token credentials. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, "Snap! Basic Login has failed with a helper function 'create_user_and_login'. Something is really wrong here.") self.client.credentials(HTTP_AUTHORIZATION='Token ' + response.data['key']) def _generate_uid_and_token(self, user): result = {} from django.utils.encoding import force_bytes from django.contrib.auth.tokens import default_token_generator from django import VERSION if VERSION[1] == 5: from django.utils.http import int_to_base36 result['uid'] = int_to_base36(user.pk) else: from django.utils.http import urlsafe_base64_encode result['uid'] = urlsafe_base64_encode(force_bytes(user.pk)) result['token'] = default_token_generator.make_token(user) return result def cleanUp(self): pass @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_basic_username_auth_method(self): """ Tests basic functionality of login with authentication method of username. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin3', '', 'password12') data = {"username": 'admin3', "email": "", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL, ACCOUNT_EMAIL_REQUIRED=True) def test_login_basic_email_auth_method(self): """ Tests basic functionality of login with authentication method of email. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin', '[email protected]', 'password12') data = {"username": '', "email": "[email protected]", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_basic_username_email_auth_method(self): """ Tests basic functionality of login with authentication method of username or email. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin', '[email protected]', 'password12') # Check email data = {"username": '', "email": "[email protected]", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) # Check username data = {"username": 'admin', "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_auth_method_username_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'username'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'email'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'username_email'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_test_login_fail_incorrect_change(self): # Create user, login and try and change password INCORRECTLY self.create_user_and_login() self.client.post(self.password_change_url, data=self.change_password_data_incorrect, format='json') # Remove credentials self.client.credentials() response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth username). """ self.common_test_login_fail_incorrect_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth email). """ self.common_test_login_fail_incorrect_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth username_email). """ self.common_test_login_fail_incorrect_change() def common_test_login_correct_password_change(self): # Create user, login and try and change password successfully self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) # Remove credentials self.client.credentials() response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth username). """ self.common_test_login_correct_password_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth email). """ self.common_test_login_correct_password_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth username_email). """ self.common_test_login_correct_password_change() def test_login_fail_no_input(self): """ Tests login fails when you provide no username and no email (login auth username_email). """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') data = {"username": '', "email": '', "password": ''} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_fail_no_input(self): """ Tests login fails when you provide no username (login auth username). """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') data = {"username": '', "email": "[email protected]", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_no_input(self): """ Tests login fails when you provide no username (login auth email). """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') data = {"username": "admin", "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_no_input(self): """ Tests login fails when you provide no username and no email (login auth username_email). """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') data = {"username": '', "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) # need to check for token # test login with password change # test login with wrong password chaneg if fails def test_logout(self): """ Tests basic logout functionality. """ self.create_user_and_login() response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Successfully logged out."}') def test_logout_but_already_logged_out(self): """ Tests logout when already logged out. """ self.create_user_and_login() response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Successfully logged out."}') self.client.credentials() # remember to remove manual token credential response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) self.assertEquals(response.content, '{"success":"Successfully logged out."}') def test_change_password_basic(self): """ Tests basic functionality of 'change of password'. """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"New password has been saved."}') def test_change_password_basic_fails_not_authorised(self): """ Tests basic functionality of 'change of password' fails if not authorised. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}') def common_change_password_login_fail_with_old_password(self, password_change_data): self.create_user_and_login() response = self.client.post(self.password_change_url, data=password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_change_password_login_pass_with_new_password(self, password_change_data): self.create_user_and_login() response = self.client.post(self.password_change_url, password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_change_password_login_fail_with_old_password_pass_with_new_password(self, password_change_data): """ Tests change of password with old password fails but new password successes. """ self.create_user_and_login() response = self.client.post(self.password_change_url, password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) def test_change_password_login_fail_with_old_password(self): """ Tests change of password with old password. """ self.common_change_password_login_fail_with_old_password(self.change_password_data) def test_change_password_login_pass_with_new_password(self): """ Tests change of password with new password. """ self.common_change_password_login_pass_with_new_password(self.change_password_data) def test_change_password_login_fail_with_old_password_pass_with_new_password(self): """ Tests change of password with old password fails but new password successes. """ self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_old_password_field_required_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' fails if old password not given as part of input (old password field enabled). """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"old_password":["This field is required."]}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_basic_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' (old password enabled). """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"New password has been saved."}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_basic_fails_not_authorised_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' fails if not authorised (old password field enabled). """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_fail_with_old_password_old_password_field_enabled(self): """ Tests change of password with old password (old password field enabled). """ self.common_change_password_login_fail_with_old_password(self.change_password_data_old_password_field_enabled) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_pass_with_new_password_old_password_field_enabled(self): """ Tests change of password with new password (old password field enabled). """ self.common_change_password_login_pass_with_new_password(self.change_password_data_old_password_field_enabled) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_fail_with_old_password_pass_with_new_password_old_password_field_enabled(self): """ Tests change of password with old password fails but new password successes (old password field enabled). """ self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data_old_password_field_enabled) """ Registrations Tests =================== """ def common_test_registration_basic(self, data): response = self.client.post(self.register_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.content) return response @override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=True) def test_registration_basic(self): """ Tests basic functionality of registration. """ self.common_test_registration_basic(self.reusable_register_user_data) @override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=False) def test_registration_basic_no_username(self): """ Tests basic functionality of registration (no username required). """ self.common_test_registration_basic(self.reusable_register_user_data_no_username) @override_settings(ACCOUNT_EMAIL_REQUIRED=False, ACCOUNT_USERNAME_REQUIRED=True) def test_registration_basic_no_email(self): """ Tests basic functionality of registration (no username required). """ self.common_test_registration_basic(self.reusable_register_user_data_no_email) @override_settings(ACCOUNTS_REGISTRATION_OPEN=False) def test_registration_basic_registration_not_open(self): """ Tests basic registration fails if registration is closed. """ response = self.client.post(self.register_url, self.reusable_register_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="none") def test_registration_email_verification_not_necessary(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data) response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional") def test_registration_email_verification_neccessary(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data) response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_test_registration(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_test_registration_email_verification_not_necessary_email(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_test_registration_email_verification_not_necessary_username(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_email(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_email() @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_optional_email(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_email() @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_optional_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_optional_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_necessary_login_fail_username(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_necessary_login_fail_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_necessary_login_fail_username_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic({'username': 'admin_man', 'email': '[email protected]', 'password1': 'password12', 'password2': 'password12'}) response = self.client.post(self.login_url, {'username': 'admin_man', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_registration_email_verification_neccessary_verified_login(self, login_data): mail_count = len(mail.outbox) reg_response = self.common_test_registration_basic(self.reusable_register_user_data1) self.assertEquals(len(mail.outbox), mail_count + 1) new_user = get_user_model().objects.latest('id') login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST) # verify email email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data1['email']).emailconfirmation_set.order_by('-created')[0] verify_response = self.client.post(self.verify_url, {'key': email_confirmation.key}, format='json') self.assertEquals(verify_response.status_code, status.HTTP_200_OK) login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_verified_login_username(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_verified_login_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'email': '[email protected]', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_verified_login_username_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) """ Password Reset Tests ==================== """ def test_password_reset(self): """ Test basic functionality of password reset. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') payload = {'email': '[email protected]'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=True) def test_password_reset_fail_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record (notify email not in system). """ payload = {'email': '[email protected]'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"error":"User with email doesn\'t exist. Did not send reset email."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=False) def test_password_reset_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record. """ payload = {'email': '[email protected]'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') def test_password_reset_confirm_fail_invalid_token(self): """ Test password reset confirm fails if token is invalid. """ user = get_user_model().objects.create_user('admin', '[email protected]', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': '-wrong-token-' } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"token":["Invalid value"]}') def test_password_reset_confirm_fail_invalid_uid(self): """ Test password reset confirm fails if uid is invalid. """ user = get_user_model().objects.create_user('admin', '[email protected]', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': 0, 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"uid":["Invalid value"]}') def test_password_reset_confirm_fail_passwords_not_the_same(self): """ Test password reset confirm fails if uid is invalid. """ user = get_user_model().objects.create_user('admin', '[email protected]', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_not_the_same_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"new_password2":["The two password fields didn\'t match."]}') def test_password_reset_confirm_login(self): """ Tests password reset confirm works -> can login afterwards. """ user = get_user_model().objects.create_user('admin', '[email protected]', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) response = self.client.post(self.login_url, {'username': 'admin', 'email': '[email protected]', 'password': 'new_password'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def test_password_reset_confirm_login_fails_with_old_password(self): """ Tests password reset confirm fails with old password. """ user = get_user_model().objects.create_user('admin', '[email protected]', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) response = self.client.post(self.login_url, {'username': 'admin', 'email': '[email protected]', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) """ User Detail Tests ================= """ def test_user_details_get(self): """ Test to retrieve user details. """ self.create_user_and_login() response = self.client.get(self.user_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"admin","email":"[email protected]","first_name":"","last_name":""}') def test_user_details_put(self): """ Test to put update user details. """ self.create_user_and_login() response = self.client.put(self.user_url, {"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}') def test_user_details_patch(self): """ Test to patch update user details. """ self.create_user_and_login() response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': '[email protected]'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"changed_username","email":"[email protected]","first_name":"","last_name":""}') def test_user_details_put_not_authenticated(self): """ Test to put update user details. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.put(self.user_url, {"username":"changed","email":"[email protected]","first_name":"changed","last_name":"name"}, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_details_patch_not_authenticated(self): """ Test to patch update user details. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': '[email protected]'}, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_details_get_not_authenticated(self): """ Test to retrieve user details. """ get_user_model().objects.create_user('admin', '[email protected]', 'password12') response = self.client.get(self.user_url, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) class TestAccountsSocial(APITestCase): """ Tests normal for social login. """ urls = 'accounts.test_social_urls' def setUp(self): self.fb_login_url = reverse('fb_login') social_app = SocialApp.objects.create( provider='facebook', name='Facebook', client_id='123123123', secret='321321321', ) site = Site.objects.get_current() social_app.sites.add(site) self.graph_api_url = GRAPH_API_URL + '/me' @responses.activate def test_social_auth(self): """ Tests Social Login. """ resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa responses.add( responses.GET, self.graph_api_url, body=resp_body, status=200, content_type='application/json' ) users_count = get_user_model().objects.all().count() response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) @responses.activate def test_social_auth_only_one_user_created(self): """ Tests Social Login. """ resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa responses.add( responses.GET, self.graph_api_url, body=resp_body, status=200, content_type='application/json' ) users_count = get_user_model().objects.all().count() response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) # make sure that second request will not create a new user response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) @responses.activate def test_failed_social_auth(self): # fake response responses.add( responses.GET, self.graph_api_url, body='', status=400, content_type='application/json' ) response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
Java
/************************************************************************ * $Id: minibidi.c 6910 2006-11-18 15:10:48Z simon $ * * ------------ * Description: * ------------ * This is an implemention of Unicode's Bidirectional Algorithm * (known as UAX #9). * * http://www.unicode.org/reports/tr9/ * * Author: Ahmad Khalifa * * ----------------- * Revision Details: (Updated by Revision Control System) * ----------------- * $Date: 2006-11-18 15:10:48 +0000 (Sat, 18 Nov 2006) $ * $Author: simon $ * $Revision: 6910 $ * * (www.arabeyes.org - under MIT license) * ************************************************************************/ /* * TODO: * ===== * - Explicit marks need to be handled (they are not 100% now) * - Ligatures */ #include <stdlib.h> /* definition of wchar_t*/ #include "misc.h" #define LMASK 0x3F /* Embedding Level mask */ #define OMASK 0xC0 /* Override mask */ #define OISL 0x80 /* Override is L */ #define OISR 0x40 /* Override is R */ /* For standalone compilation in a testing mode. * Still depends on the PuTTY headers for snewn and sfree, but can avoid * _linking_ with any other PuTTY code. */ #ifdef TEST_GETTYPE #define safemalloc malloc #define safefree free #endif /* Shaping Helpers */ #define STYPE(xh) ((((xh) >= SHAPE_FIRST) && ((xh) <= SHAPE_LAST)) ? \ shapetypes[(xh)-SHAPE_FIRST].type : SU) /*))*/ #define SISOLATED(xh) (shapetypes[(xh)-SHAPE_FIRST].form_b) #define SFINAL(xh) ((xh)+1) #define SINITIAL(xh) ((xh)+2) #define SMEDIAL(ch) ((ch)+3) #define leastGreaterOdd(x) ( ((x)+1) | 1 ) #define leastGreaterEven(x) ( ((x)+2) &~ 1 ) typedef struct bidi_char { wchar_t origwc, wc; unsigned short index; } bidi_char; /* function declarations */ void flipThisRun(bidi_char *from, unsigned char* level, int max, int count); int findIndexOfRun(unsigned char* level , int start, int count, int tlevel); unsigned char getType(int ch); unsigned char setOverrideBits(unsigned char level, unsigned char override); int getPreviousLevel(unsigned char* level, int from); int do_shape(bidi_char *line, bidi_char *to, int count); int do_bidi(bidi_char *line, int count); void doMirror(wchar_t* ch); /* character types */ enum { L, LRE, LRO, R, AL, RLE, RLO, PDF, EN, ES, ET, AN, CS, NSM, BN, B, S, WS, ON }; /* Shaping Types */ enum { SL, /* Left-Joining, doesnt exist in U+0600 - U+06FF */ SR, /* Right-Joining, ie has Isolated, Final */ SD, /* Dual-Joining, ie has Isolated, Final, Initial, Medial */ SU, /* Non-Joining */ SC /* Join-Causing, like U+0640 (TATWEEL) */ }; typedef struct { char type; wchar_t form_b; } shape_node; /* Kept near the actual table, for verification. */ #define SHAPE_FIRST 0x621 #define SHAPE_LAST 0x64A const shape_node shapetypes[] = { /* index, Typ, Iso, Ligature Index*/ /* 621 */ {SU, 0xFE80}, /* 622 */ {SR, 0xFE81}, /* 623 */ {SR, 0xFE83}, /* 624 */ {SR, 0xFE85}, /* 625 */ {SR, 0xFE87}, /* 626 */ {SD, 0xFE89}, /* 627 */ {SR, 0xFE8D}, /* 628 */ {SD, 0xFE8F}, /* 629 */ {SR, 0xFE93}, /* 62A */ {SD, 0xFE95}, /* 62B */ {SD, 0xFE99}, /* 62C */ {SD, 0xFE9D}, /* 62D */ {SD, 0xFEA1}, /* 62E */ {SD, 0xFEA5}, /* 62F */ {SR, 0xFEA9}, /* 630 */ {SR, 0xFEAB}, /* 631 */ {SR, 0xFEAD}, /* 632 */ {SR, 0xFEAF}, /* 633 */ {SD, 0xFEB1}, /* 634 */ {SD, 0xFEB5}, /* 635 */ {SD, 0xFEB9}, /* 636 */ {SD, 0xFEBD}, /* 637 */ {SD, 0xFEC1}, /* 638 */ {SD, 0xFEC5}, /* 639 */ {SD, 0xFEC9}, /* 63A */ {SD, 0xFECD}, /* 63B */ {SU, 0x0}, /* 63C */ {SU, 0x0}, /* 63D */ {SU, 0x0}, /* 63E */ {SU, 0x0}, /* 63F */ {SU, 0x0}, /* 640 */ {SC, 0x0}, /* 641 */ {SD, 0xFED1}, /* 642 */ {SD, 0xFED5}, /* 643 */ {SD, 0xFED9}, /* 644 */ {SD, 0xFEDD}, /* 645 */ {SD, 0xFEE1}, /* 646 */ {SD, 0xFEE5}, /* 647 */ {SD, 0xFEE9}, /* 648 */ {SR, 0xFEED}, /* 649 */ {SR, 0xFEEF}, /* SD */ /* 64A */ {SD, 0xFEF1} }; /* * Flips the text buffer, according to max level, and * all higher levels * * Input: * from: text buffer, on which to apply flipping * level: resolved levels buffer * max: the maximum level found in this line (should be unsigned char) * count: line size in bidi_char */ void flipThisRun(bidi_char *from, unsigned char *level, int max, int count) { int i, j, k, tlevel; bidi_char temp; j = i = 0; while (i<count && j<count) { /* find the start of the run of level=max */ tlevel = max; i = j = findIndexOfRun(level, i, count, max); /* find the end of the run */ while (i<count && tlevel <= level[i]) { i++; } for (k = i - 1; k > j; k--, j++) { temp = from[k]; from[k] = from[j]; from[j] = temp; } } } /* * Finds the index of a run with level equals tlevel */ int findIndexOfRun(unsigned char* level , int start, int count, int tlevel) { int i; for (i=start; i<count; i++) { if (tlevel == level[i]) { return i; } } return count; } /* * Returns the bidi character type of ch. * * The data table in this function is constructed from the Unicode * Character Database, downloadable from unicode.org at the URL * * http://www.unicode.org/Public/UNIDATA/UnicodeData.txt * * by the following fragment of Perl: perl -ne 'split ";"; $num = hex $_[0]; $type = $_[4];' \ -e '$fl = ($_[1] =~ /First/ ? 1 : $_[1] =~ /Last/ ? 2 : 0);' \ -e 'if ($type eq $runtype and ($runend == $num-1 or ' \ -e ' ($fl==2 and $pfl==1))) {$runend = $num;} else { &reset; }' \ -e '$pfl=$fl; END { &reset }; sub reset {' \ -e 'printf" {0x%04x, 0x%04x, %s},\n",$runstart,$runend,$runtype' \ -e ' if defined $runstart and $runtype ne "ON";' \ -e '$runstart=$runend=$num; $runtype=$type;}' \ UnicodeData.txt */ unsigned char getType(int ch) { static const struct { int first, last, type; } lookup[] = { {0x0000, 0x0008, BN}, {0x0009, 0x0009, S}, {0x000a, 0x000a, B}, {0x000b, 0x000b, S}, {0x000c, 0x000c, WS}, {0x000d, 0x000d, B}, {0x000e, 0x001b, BN}, {0x001c, 0x001e, B}, {0x001f, 0x001f, S}, {0x0020, 0x0020, WS}, {0x0023, 0x0025, ET}, {0x002b, 0x002b, ES}, {0x002c, 0x002c, CS}, {0x002d, 0x002d, ES}, {0x002e, 0x002f, CS}, {0x0030, 0x0039, EN}, {0x003a, 0x003a, CS}, {0x0041, 0x005a, L}, {0x0061, 0x007a, L}, {0x007f, 0x0084, BN}, {0x0085, 0x0085, B}, {0x0086, 0x009f, BN}, {0x00a0, 0x00a0, CS}, {0x00a2, 0x00a5, ET}, {0x00aa, 0x00aa, L}, {0x00ad, 0x00ad, BN}, {0x00b0, 0x00b1, ET}, {0x00b2, 0x00b3, EN}, {0x00b5, 0x00b5, L}, {0x00b9, 0x00b9, EN}, {0x00ba, 0x00ba, L}, {0x00c0, 0x00d6, L}, {0x00d8, 0x00f6, L}, {0x00f8, 0x0236, L}, {0x0250, 0x02b8, L}, {0x02bb, 0x02c1, L}, {0x02d0, 0x02d1, L}, {0x02e0, 0x02e4, L}, {0x02ee, 0x02ee, L}, {0x0300, 0x0357, NSM}, {0x035d, 0x036f, NSM}, {0x037a, 0x037a, L}, {0x0386, 0x0386, L}, {0x0388, 0x038a, L}, {0x038c, 0x038c, L}, {0x038e, 0x03a1, L}, {0x03a3, 0x03ce, L}, {0x03d0, 0x03f5, L}, {0x03f7, 0x03fb, L}, {0x0400, 0x0482, L}, {0x0483, 0x0486, NSM}, {0x0488, 0x0489, NSM}, {0x048a, 0x04ce, L}, {0x04d0, 0x04f5, L}, {0x04f8, 0x04f9, L}, {0x0500, 0x050f, L}, {0x0531, 0x0556, L}, {0x0559, 0x055f, L}, {0x0561, 0x0587, L}, {0x0589, 0x0589, L}, {0x0591, 0x05a1, NSM}, {0x05a3, 0x05b9, NSM}, {0x05bb, 0x05bd, NSM}, {0x05be, 0x05be, R}, {0x05bf, 0x05bf, NSM}, {0x05c0, 0x05c0, R}, {0x05c1, 0x05c2, NSM}, {0x05c3, 0x05c3, R}, {0x05c4, 0x05c4, NSM}, {0x05d0, 0x05ea, R}, {0x05f0, 0x05f4, R}, {0x0600, 0x0603, AL}, {0x060c, 0x060c, CS}, {0x060d, 0x060d, AL}, {0x0610, 0x0615, NSM}, {0x061b, 0x061b, AL}, {0x061f, 0x061f, AL}, {0x0621, 0x063a, AL}, {0x0640, 0x064a, AL}, {0x064b, 0x0658, NSM}, {0x0660, 0x0669, AN}, {0x066a, 0x066a, ET}, {0x066b, 0x066c, AN}, {0x066d, 0x066f, AL}, {0x0670, 0x0670, NSM}, {0x0671, 0x06d5, AL}, {0x06d6, 0x06dc, NSM}, {0x06dd, 0x06dd, AL}, {0x06de, 0x06e4, NSM}, {0x06e5, 0x06e6, AL}, {0x06e7, 0x06e8, NSM}, {0x06ea, 0x06ed, NSM}, {0x06ee, 0x06ef, AL}, {0x06f0, 0x06f9, EN}, {0x06fa, 0x070d, AL}, {0x070f, 0x070f, BN}, {0x0710, 0x0710, AL}, {0x0711, 0x0711, NSM}, {0x0712, 0x072f, AL}, {0x0730, 0x074a, NSM}, {0x074d, 0x074f, AL}, {0x0780, 0x07a5, AL}, {0x07a6, 0x07b0, NSM}, {0x07b1, 0x07b1, AL}, {0x0901, 0x0902, NSM}, {0x0903, 0x0939, L}, {0x093c, 0x093c, NSM}, {0x093d, 0x0940, L}, {0x0941, 0x0948, NSM}, {0x0949, 0x094c, L}, {0x094d, 0x094d, NSM}, {0x0950, 0x0950, L}, {0x0951, 0x0954, NSM}, {0x0958, 0x0961, L}, {0x0962, 0x0963, NSM}, {0x0964, 0x0970, L}, {0x0981, 0x0981, NSM}, {0x0982, 0x0983, L}, {0x0985, 0x098c, L}, {0x098f, 0x0990, L}, {0x0993, 0x09a8, L}, {0x09aa, 0x09b0, L}, {0x09b2, 0x09b2, L}, {0x09b6, 0x09b9, L}, {0x09bc, 0x09bc, NSM}, {0x09bd, 0x09c0, L}, {0x09c1, 0x09c4, NSM}, {0x09c7, 0x09c8, L}, {0x09cb, 0x09cc, L}, {0x09cd, 0x09cd, NSM}, {0x09d7, 0x09d7, L}, {0x09dc, 0x09dd, L}, {0x09df, 0x09e1, L}, {0x09e2, 0x09e3, NSM}, {0x09e6, 0x09f1, L}, {0x09f2, 0x09f3, ET}, {0x09f4, 0x09fa, L}, {0x0a01, 0x0a02, NSM}, {0x0a03, 0x0a03, L}, {0x0a05, 0x0a0a, L}, {0x0a0f, 0x0a10, L}, {0x0a13, 0x0a28, L}, {0x0a2a, 0x0a30, L}, {0x0a32, 0x0a33, L}, {0x0a35, 0x0a36, L}, {0x0a38, 0x0a39, L}, {0x0a3c, 0x0a3c, NSM}, {0x0a3e, 0x0a40, L}, {0x0a41, 0x0a42, NSM}, {0x0a47, 0x0a48, NSM}, {0x0a4b, 0x0a4d, NSM}, {0x0a59, 0x0a5c, L}, {0x0a5e, 0x0a5e, L}, {0x0a66, 0x0a6f, L}, {0x0a70, 0x0a71, NSM}, {0x0a72, 0x0a74, L}, {0x0a81, 0x0a82, NSM}, {0x0a83, 0x0a83, L}, {0x0a85, 0x0a8d, L}, {0x0a8f, 0x0a91, L}, {0x0a93, 0x0aa8, L}, {0x0aaa, 0x0ab0, L}, {0x0ab2, 0x0ab3, L}, {0x0ab5, 0x0ab9, L}, {0x0abc, 0x0abc, NSM}, {0x0abd, 0x0ac0, L}, {0x0ac1, 0x0ac5, NSM}, {0x0ac7, 0x0ac8, NSM}, {0x0ac9, 0x0ac9, L}, {0x0acb, 0x0acc, L}, {0x0acd, 0x0acd, NSM}, {0x0ad0, 0x0ad0, L}, {0x0ae0, 0x0ae1, L}, {0x0ae2, 0x0ae3, NSM}, {0x0ae6, 0x0aef, L}, {0x0af1, 0x0af1, ET}, {0x0b01, 0x0b01, NSM}, {0x0b02, 0x0b03, L}, {0x0b05, 0x0b0c, L}, {0x0b0f, 0x0b10, L}, {0x0b13, 0x0b28, L}, {0x0b2a, 0x0b30, L}, {0x0b32, 0x0b33, L}, {0x0b35, 0x0b39, L}, {0x0b3c, 0x0b3c, NSM}, {0x0b3d, 0x0b3e, L}, {0x0b3f, 0x0b3f, NSM}, {0x0b40, 0x0b40, L}, {0x0b41, 0x0b43, NSM}, {0x0b47, 0x0b48, L}, {0x0b4b, 0x0b4c, L}, {0x0b4d, 0x0b4d, NSM}, {0x0b56, 0x0b56, NSM}, {0x0b57, 0x0b57, L}, {0x0b5c, 0x0b5d, L}, {0x0b5f, 0x0b61, L}, {0x0b66, 0x0b71, L}, {0x0b82, 0x0b82, NSM}, {0x0b83, 0x0b83, L}, {0x0b85, 0x0b8a, L}, {0x0b8e, 0x0b90, L}, {0x0b92, 0x0b95, L}, {0x0b99, 0x0b9a, L}, {0x0b9c, 0x0b9c, L}, {0x0b9e, 0x0b9f, L}, {0x0ba3, 0x0ba4, L}, {0x0ba8, 0x0baa, L}, {0x0bae, 0x0bb5, L}, {0x0bb7, 0x0bb9, L}, {0x0bbe, 0x0bbf, L}, {0x0bc0, 0x0bc0, NSM}, {0x0bc1, 0x0bc2, L}, {0x0bc6, 0x0bc8, L}, {0x0bca, 0x0bcc, L}, {0x0bcd, 0x0bcd, NSM}, {0x0bd7, 0x0bd7, L}, {0x0be7, 0x0bf2, L}, {0x0bf9, 0x0bf9, ET}, {0x0c01, 0x0c03, L}, {0x0c05, 0x0c0c, L}, {0x0c0e, 0x0c10, L}, {0x0c12, 0x0c28, L}, {0x0c2a, 0x0c33, L}, {0x0c35, 0x0c39, L}, {0x0c3e, 0x0c40, NSM}, {0x0c41, 0x0c44, L}, {0x0c46, 0x0c48, NSM}, {0x0c4a, 0x0c4d, NSM}, {0x0c55, 0x0c56, NSM}, {0x0c60, 0x0c61, L}, {0x0c66, 0x0c6f, L}, {0x0c82, 0x0c83, L}, {0x0c85, 0x0c8c, L}, {0x0c8e, 0x0c90, L}, {0x0c92, 0x0ca8, L}, {0x0caa, 0x0cb3, L}, {0x0cb5, 0x0cb9, L}, {0x0cbc, 0x0cbc, NSM}, {0x0cbd, 0x0cc4, L}, {0x0cc6, 0x0cc8, L}, {0x0cca, 0x0ccb, L}, {0x0ccc, 0x0ccd, NSM}, {0x0cd5, 0x0cd6, L}, {0x0cde, 0x0cde, L}, {0x0ce0, 0x0ce1, L}, {0x0ce6, 0x0cef, L}, {0x0d02, 0x0d03, L}, {0x0d05, 0x0d0c, L}, {0x0d0e, 0x0d10, L}, {0x0d12, 0x0d28, L}, {0x0d2a, 0x0d39, L}, {0x0d3e, 0x0d40, L}, {0x0d41, 0x0d43, NSM}, {0x0d46, 0x0d48, L}, {0x0d4a, 0x0d4c, L}, {0x0d4d, 0x0d4d, NSM}, {0x0d57, 0x0d57, L}, {0x0d60, 0x0d61, L}, {0x0d66, 0x0d6f, L}, {0x0d82, 0x0d83, L}, {0x0d85, 0x0d96, L}, {0x0d9a, 0x0db1, L}, {0x0db3, 0x0dbb, L}, {0x0dbd, 0x0dbd, L}, {0x0dc0, 0x0dc6, L}, {0x0dca, 0x0dca, NSM}, {0x0dcf, 0x0dd1, L}, {0x0dd2, 0x0dd4, NSM}, {0x0dd6, 0x0dd6, NSM}, {0x0dd8, 0x0ddf, L}, {0x0df2, 0x0df4, L}, {0x0e01, 0x0e30, L}, {0x0e31, 0x0e31, NSM}, {0x0e32, 0x0e33, L}, {0x0e34, 0x0e3a, NSM}, {0x0e3f, 0x0e3f, ET}, {0x0e40, 0x0e46, L}, {0x0e47, 0x0e4e, NSM}, {0x0e4f, 0x0e5b, L}, {0x0e81, 0x0e82, L}, {0x0e84, 0x0e84, L}, {0x0e87, 0x0e88, L}, {0x0e8a, 0x0e8a, L}, {0x0e8d, 0x0e8d, L}, {0x0e94, 0x0e97, L}, {0x0e99, 0x0e9f, L}, {0x0ea1, 0x0ea3, L}, {0x0ea5, 0x0ea5, L}, {0x0ea7, 0x0ea7, L}, {0x0eaa, 0x0eab, L}, {0x0ead, 0x0eb0, L}, {0x0eb1, 0x0eb1, NSM}, {0x0eb2, 0x0eb3, L}, {0x0eb4, 0x0eb9, NSM}, {0x0ebb, 0x0ebc, NSM}, {0x0ebd, 0x0ebd, L}, {0x0ec0, 0x0ec4, L}, {0x0ec6, 0x0ec6, L}, {0x0ec8, 0x0ecd, NSM}, {0x0ed0, 0x0ed9, L}, {0x0edc, 0x0edd, L}, {0x0f00, 0x0f17, L}, {0x0f18, 0x0f19, NSM}, {0x0f1a, 0x0f34, L}, {0x0f35, 0x0f35, NSM}, {0x0f36, 0x0f36, L}, {0x0f37, 0x0f37, NSM}, {0x0f38, 0x0f38, L}, {0x0f39, 0x0f39, NSM}, {0x0f3e, 0x0f47, L}, {0x0f49, 0x0f6a, L}, {0x0f71, 0x0f7e, NSM}, {0x0f7f, 0x0f7f, L}, {0x0f80, 0x0f84, NSM}, {0x0f85, 0x0f85, L}, {0x0f86, 0x0f87, NSM}, {0x0f88, 0x0f8b, L}, {0x0f90, 0x0f97, NSM}, {0x0f99, 0x0fbc, NSM}, {0x0fbe, 0x0fc5, L}, {0x0fc6, 0x0fc6, NSM}, {0x0fc7, 0x0fcc, L}, {0x0fcf, 0x0fcf, L}, {0x1000, 0x1021, L}, {0x1023, 0x1027, L}, {0x1029, 0x102a, L}, {0x102c, 0x102c, L}, {0x102d, 0x1030, NSM}, {0x1031, 0x1031, L}, {0x1032, 0x1032, NSM}, {0x1036, 0x1037, NSM}, {0x1038, 0x1038, L}, {0x1039, 0x1039, NSM}, {0x1040, 0x1057, L}, {0x1058, 0x1059, NSM}, {0x10a0, 0x10c5, L}, {0x10d0, 0x10f8, L}, {0x10fb, 0x10fb, L}, {0x1100, 0x1159, L}, {0x115f, 0x11a2, L}, {0x11a8, 0x11f9, L}, {0x1200, 0x1206, L}, {0x1208, 0x1246, L}, {0x1248, 0x1248, L}, {0x124a, 0x124d, L}, {0x1250, 0x1256, L}, {0x1258, 0x1258, L}, {0x125a, 0x125d, L}, {0x1260, 0x1286, L}, {0x1288, 0x1288, L}, {0x128a, 0x128d, L}, {0x1290, 0x12ae, L}, {0x12b0, 0x12b0, L}, {0x12b2, 0x12b5, L}, {0x12b8, 0x12be, L}, {0x12c0, 0x12c0, L}, {0x12c2, 0x12c5, L}, {0x12c8, 0x12ce, L}, {0x12d0, 0x12d6, L}, {0x12d8, 0x12ee, L}, {0x12f0, 0x130e, L}, {0x1310, 0x1310, L}, {0x1312, 0x1315, L}, {0x1318, 0x131e, L}, {0x1320, 0x1346, L}, {0x1348, 0x135a, L}, {0x1361, 0x137c, L}, {0x13a0, 0x13f4, L}, {0x1401, 0x1676, L}, {0x1680, 0x1680, WS}, {0x1681, 0x169a, L}, {0x16a0, 0x16f0, L}, {0x1700, 0x170c, L}, {0x170e, 0x1711, L}, {0x1712, 0x1714, NSM}, {0x1720, 0x1731, L}, {0x1732, 0x1734, NSM}, {0x1735, 0x1736, L}, {0x1740, 0x1751, L}, {0x1752, 0x1753, NSM}, {0x1760, 0x176c, L}, {0x176e, 0x1770, L}, {0x1772, 0x1773, NSM}, {0x1780, 0x17b6, L}, {0x17b7, 0x17bd, NSM}, {0x17be, 0x17c5, L}, {0x17c6, 0x17c6, NSM}, {0x17c7, 0x17c8, L}, {0x17c9, 0x17d3, NSM}, {0x17d4, 0x17da, L}, {0x17db, 0x17db, ET}, {0x17dc, 0x17dc, L}, {0x17dd, 0x17dd, NSM}, {0x17e0, 0x17e9, L}, {0x180b, 0x180d, NSM}, {0x180e, 0x180e, WS}, {0x1810, 0x1819, L}, {0x1820, 0x1877, L}, {0x1880, 0x18a8, L}, {0x18a9, 0x18a9, NSM}, {0x1900, 0x191c, L}, {0x1920, 0x1922, NSM}, {0x1923, 0x1926, L}, {0x1927, 0x192b, NSM}, {0x1930, 0x1931, L}, {0x1932, 0x1932, NSM}, {0x1933, 0x1938, L}, {0x1939, 0x193b, NSM}, {0x1946, 0x196d, L}, {0x1970, 0x1974, L}, {0x1d00, 0x1d6b, L}, {0x1e00, 0x1e9b, L}, {0x1ea0, 0x1ef9, L}, {0x1f00, 0x1f15, L}, {0x1f18, 0x1f1d, L}, {0x1f20, 0x1f45, L}, {0x1f48, 0x1f4d, L}, {0x1f50, 0x1f57, L}, {0x1f59, 0x1f59, L}, {0x1f5b, 0x1f5b, L}, {0x1f5d, 0x1f5d, L}, {0x1f5f, 0x1f7d, L}, {0x1f80, 0x1fb4, L}, {0x1fb6, 0x1fbc, L}, {0x1fbe, 0x1fbe, L}, {0x1fc2, 0x1fc4, L}, {0x1fc6, 0x1fcc, L}, {0x1fd0, 0x1fd3, L}, {0x1fd6, 0x1fdb, L}, {0x1fe0, 0x1fec, L}, {0x1ff2, 0x1ff4, L}, {0x1ff6, 0x1ffc, L}, {0x2000, 0x200a, WS}, {0x200b, 0x200d, BN}, {0x200e, 0x200e, L}, {0x200f, 0x200f, R}, {0x2028, 0x2028, WS}, {0x2029, 0x2029, B}, {0x202a, 0x202a, LRE}, {0x202b, 0x202b, RLE}, {0x202c, 0x202c, PDF}, {0x202d, 0x202d, LRO}, {0x202e, 0x202e, RLO}, {0x202f, 0x202f, WS}, {0x2030, 0x2034, ET}, {0x2044, 0x2044, CS}, {0x205f, 0x205f, WS}, {0x2060, 0x2063, BN}, {0x206a, 0x206f, BN}, {0x2070, 0x2070, EN}, {0x2071, 0x2071, L}, {0x2074, 0x2079, EN}, {0x207a, 0x207b, ET}, {0x207f, 0x207f, L}, {0x2080, 0x2089, EN}, {0x208a, 0x208b, ET}, {0x20a0, 0x20b1, ET}, {0x20d0, 0x20ea, NSM}, {0x2102, 0x2102, L}, {0x2107, 0x2107, L}, {0x210a, 0x2113, L}, {0x2115, 0x2115, L}, {0x2119, 0x211d, L}, {0x2124, 0x2124, L}, {0x2126, 0x2126, L}, {0x2128, 0x2128, L}, {0x212a, 0x212d, L}, {0x212e, 0x212e, ET}, {0x212f, 0x2131, L}, {0x2133, 0x2139, L}, {0x213d, 0x213f, L}, {0x2145, 0x2149, L}, {0x2160, 0x2183, L}, {0x2212, 0x2213, ET}, {0x2336, 0x237a, L}, {0x2395, 0x2395, L}, {0x2488, 0x249b, EN}, {0x249c, 0x24e9, L}, {0x2800, 0x28ff, L}, {0x3000, 0x3000, WS}, {0x3005, 0x3007, L}, {0x3021, 0x3029, L}, {0x302a, 0x302f, NSM}, {0x3031, 0x3035, L}, {0x3038, 0x303c, L}, {0x3041, 0x3096, L}, {0x3099, 0x309a, NSM}, {0x309d, 0x309f, L}, {0x30a1, 0x30fa, L}, {0x30fc, 0x30ff, L}, {0x3105, 0x312c, L}, {0x3131, 0x318e, L}, {0x3190, 0x31b7, L}, {0x31f0, 0x321c, L}, {0x3220, 0x3243, L}, {0x3260, 0x327b, L}, {0x327f, 0x32b0, L}, {0x32c0, 0x32cb, L}, {0x32d0, 0x32fe, L}, {0x3300, 0x3376, L}, {0x337b, 0x33dd, L}, {0x33e0, 0x33fe, L}, {0x3400, 0x4db5, L}, {0x4e00, 0x9fa5, L}, {0xa000, 0xa48c, L}, {0xac00, 0xd7a3, L}, {0xd800, 0xfa2d, L}, {0xfa30, 0xfa6a, L}, {0xfb00, 0xfb06, L}, {0xfb13, 0xfb17, L}, {0xfb1d, 0xfb1d, R}, {0xfb1e, 0xfb1e, NSM}, {0xfb1f, 0xfb28, R}, {0xfb29, 0xfb29, ET}, {0xfb2a, 0xfb36, R}, {0xfb38, 0xfb3c, R}, {0xfb3e, 0xfb3e, R}, {0xfb40, 0xfb41, R}, {0xfb43, 0xfb44, R}, {0xfb46, 0xfb4f, R}, {0xfb50, 0xfbb1, AL}, {0xfbd3, 0xfd3d, AL}, {0xfd50, 0xfd8f, AL}, {0xfd92, 0xfdc7, AL}, {0xfdf0, 0xfdfc, AL}, {0xfe00, 0xfe0f, NSM}, {0xfe20, 0xfe23, NSM}, {0xfe50, 0xfe50, CS}, {0xfe52, 0xfe52, CS}, {0xfe55, 0xfe55, CS}, {0xfe5f, 0xfe5f, ET}, {0xfe62, 0xfe63, ET}, {0xfe69, 0xfe6a, ET}, {0xfe70, 0xfe74, AL}, {0xfe76, 0xfefc, AL}, {0xfeff, 0xfeff, BN}, {0xff03, 0xff05, ET}, {0xff0b, 0xff0b, ET}, {0xff0c, 0xff0c, CS}, {0xff0d, 0xff0d, ET}, {0xff0e, 0xff0e, CS}, {0xff0f, 0xff0f, ES}, {0xff10, 0xff19, EN}, {0xff1a, 0xff1a, CS}, {0xff21, 0xff3a, L}, {0xff41, 0xff5a, L}, {0xff66, 0xffbe, L}, {0xffc2, 0xffc7, L}, {0xffca, 0xffcf, L}, {0xffd2, 0xffd7, L}, {0xffda, 0xffdc, L}, {0xffe0, 0xffe1, ET}, {0xffe5, 0xffe6, ET}, {0x10000, 0x1000b, L}, {0x1000d, 0x10026, L}, {0x10028, 0x1003a, L}, {0x1003c, 0x1003d, L}, {0x1003f, 0x1004d, L}, {0x10050, 0x1005d, L}, {0x10080, 0x100fa, L}, {0x10100, 0x10100, L}, {0x10102, 0x10102, L}, {0x10107, 0x10133, L}, {0x10137, 0x1013f, L}, {0x10300, 0x1031e, L}, {0x10320, 0x10323, L}, {0x10330, 0x1034a, L}, {0x10380, 0x1039d, L}, {0x1039f, 0x1039f, L}, {0x10400, 0x1049d, L}, {0x104a0, 0x104a9, L}, {0x10800, 0x10805, R}, {0x10808, 0x10808, R}, {0x1080a, 0x10835, R}, {0x10837, 0x10838, R}, {0x1083c, 0x1083c, R}, {0x1083f, 0x1083f, R}, {0x1d000, 0x1d0f5, L}, {0x1d100, 0x1d126, L}, {0x1d12a, 0x1d166, L}, {0x1d167, 0x1d169, NSM}, {0x1d16a, 0x1d172, L}, {0x1d173, 0x1d17a, BN}, {0x1d17b, 0x1d182, NSM}, {0x1d183, 0x1d184, L}, {0x1d185, 0x1d18b, NSM}, {0x1d18c, 0x1d1a9, L}, {0x1d1aa, 0x1d1ad, NSM}, {0x1d1ae, 0x1d1dd, L}, {0x1d400, 0x1d454, L}, {0x1d456, 0x1d49c, L}, {0x1d49e, 0x1d49f, L}, {0x1d4a2, 0x1d4a2, L}, {0x1d4a5, 0x1d4a6, L}, {0x1d4a9, 0x1d4ac, L}, {0x1d4ae, 0x1d4b9, L}, {0x1d4bb, 0x1d4bb, L}, {0x1d4bd, 0x1d4c3, L}, {0x1d4c5, 0x1d505, L}, {0x1d507, 0x1d50a, L}, {0x1d50d, 0x1d514, L}, {0x1d516, 0x1d51c, L}, {0x1d51e, 0x1d539, L}, {0x1d53b, 0x1d53e, L}, {0x1d540, 0x1d544, L}, {0x1d546, 0x1d546, L}, {0x1d54a, 0x1d550, L}, {0x1d552, 0x1d6a3, L}, {0x1d6a8, 0x1d7c9, L}, {0x1d7ce, 0x1d7ff, EN}, {0x20000, 0x2a6d6, L}, {0x2f800, 0x2fa1d, L}, {0xe0001, 0xe0001, BN}, {0xe0020, 0xe007f, BN}, {0xe0100, 0xe01ef, NSM}, {0xf0000, 0xffffd, L}, {0x100000, 0x10fffd, L} }; int i, j, k; i = -1; j = lenof(lookup); while (j - i > 1) { k = (i + j) / 2; if (ch < lookup[k].first) j = k; else if (ch > lookup[k].last) i = k; else return lookup[k].type; } /* * If we reach here, the character was not in any of the * intervals listed in the lookup table. This means we return * ON (`Other Neutrals'). This is the appropriate code for any * character genuinely not listed in the Unicode table, and * also the table above has deliberately left out any * characters _explicitly_ listed as ON (to save space!). */ return ON; } /* * Function exported to front ends to allow them to identify * bidi-active characters (in case, for example, the platform's * text display function can't conveniently be prevented from doing * its own bidi and so special treatment is required for characters * that would cause the bidi algorithm to activate). * * This function is passed a single Unicode code point, and returns * nonzero if the presence of this code point can possibly cause * the bidi algorithm to do any reordering. Thus, any string * composed entirely of characters for which is_rtl() returns zero * should be safe to pass to a bidi-active platform display * function without fear. * * (is_rtl() must therefore also return true for any character * which would be affected by Arabic shaping, but this isn't * important because all such characters are right-to-left so it * would have flagged them anyway.) */ int is_rtl(int c) { /* * After careful reading of the Unicode bidi algorithm (URL as * given at the top of this file) I believe that the only * character classes which can possibly cause trouble are R, * AL, RLE and RLO. I think that any string containing no * character in any of those classes will be displayed * uniformly left-to-right by the Unicode bidi algorithm. */ const int mask = (1<<R) | (1<<AL) | (1<<RLE) | (1<<RLO); return mask & (1 << (getType(c))); } /* * The most significant 2 bits of each level are used to store * Override status of each character * This function sets the override bits of level according * to the value in override, and reurns the new byte. */ unsigned char setOverrideBits(unsigned char level, unsigned char override) { if (override == ON) return level; else if (override == R) return level | OISR; else if (override == L) return level | OISL; return level; } /* * Find the most recent run of the same value in `level', and * return the value _before_ it. Used to process U+202C POP * DIRECTIONAL FORMATTING. */ int getPreviousLevel(unsigned char* level, int from) { if (from > 0) { unsigned char current = level[--from]; while (from >= 0 && level[from] == current) from--; if (from >= 0) return level[from]; return -1; } else return -1; } /* The Main shaping function, and the only one to be used * by the outside world. * * line: buffer to apply shaping to. this must be passed by doBidi() first * to: output buffer for the shaped data * count: number of characters in line */ int do_shape(bidi_char *line, bidi_char *to, int count) { int i, tempShape, ligFlag; for (ligFlag=i=0; i<count; i++) { to[i] = line[i]; tempShape = STYPE(line[i].wc); switch (tempShape) { case SC: break; case SU: break; case SR: tempShape = (i+1 < count ? STYPE(line[i+1].wc) : SU); if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) to[i].wc = SFINAL((SISOLATED(line[i].wc))); else to[i].wc = SISOLATED(line[i].wc); break; case SD: /* Make Ligatures */ tempShape = (i+1 < count ? STYPE(line[i+1].wc) : SU); if (line[i].wc == 0x644) { if (i > 0) switch (line[i-1].wc) { case 0x622: ligFlag = 1; if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) to[i].wc = 0xFEF6; else to[i].wc = 0xFEF5; break; case 0x623: ligFlag = 1; if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) to[i].wc = 0xFEF8; else to[i].wc = 0xFEF7; break; case 0x625: ligFlag = 1; if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) to[i].wc = 0xFEFA; else to[i].wc = 0xFEF9; break; case 0x627: ligFlag = 1; if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) to[i].wc = 0xFEFC; else to[i].wc = 0xFEFB; break; } if (ligFlag) { to[i-1].wc = 0x20; ligFlag = 0; break; } } if ((tempShape == SL) || (tempShape == SD) || (tempShape == SC)) { tempShape = (i > 0 ? STYPE(line[i-1].wc) : SU); if ((tempShape == SR) || (tempShape == SD) || (tempShape == SC)) to[i].wc = SMEDIAL((SISOLATED(line[i].wc))); else to[i].wc = SFINAL((SISOLATED(line[i].wc))); break; } tempShape = (i > 0 ? STYPE(line[i-1].wc) : SU); if ((tempShape == SR) || (tempShape == SD) || (tempShape == SC)) to[i].wc = SINITIAL((SISOLATED(line[i].wc))); else to[i].wc = SISOLATED(line[i].wc); break; } } return 1; } /* * The Main Bidi Function, and the only function that should * be used by the outside world. * * line: a buffer of size count containing text to apply * the Bidirectional algorithm to. */ int do_bidi(bidi_char *line, int count) { unsigned char* types; unsigned char* levels; unsigned char paragraphLevel; unsigned char currentEmbedding; unsigned char currentOverride; unsigned char tempType; int i, j, imax, yes, bover; /* Check the presence of R or AL types as optimization */ yes = 0; for (i=0; i<count; i++) { int type = getType(line[i].wc); if (type == R || type == AL) { yes = 1; break; } } if (yes == 0) return L; /* Initialize types, levels */ types = snewn(count, unsigned char); levels = snewn(count, unsigned char); /* Rule (P1) NOT IMPLEMENTED * P1. Split the text into separate paragraphs. A paragraph separator is * kept with the previous paragraph. Within each paragraph, apply all the * other rules of this algorithm. */ /* Rule (P2), (P3) * P2. In each paragraph, find the first character of type L, AL, or R. * P3. If a character is found in P2 and it is of type AL or R, then set * the paragraph embedding level to one; otherwise, set it to zero. */ paragraphLevel = 0; for (i=0; i<count ; i++) { int type = getType(line[i].wc); if (type == R || type == AL) { paragraphLevel = 1; break; } else if (type == L) break; } /* Rule (X1) * X1. Begin by setting the current embedding level to the paragraph * embedding level. Set the directional override status to neutral. */ currentEmbedding = paragraphLevel; currentOverride = ON; /* Rule (X2), (X3), (X4), (X5), (X6), (X7), (X8) * X2. With each RLE, compute the least greater odd embedding level. * X3. With each LRE, compute the least greater even embedding level. * X4. With each RLO, compute the least greater odd embedding level. * X5. With each LRO, compute the least greater even embedding level. * X6. For all types besides RLE, LRE, RLO, LRO, and PDF: * a. Set the level of the current character to the current * embedding level. * b. Whenever the directional override status is not neutral, * reset the current character type to the directional * override status. * X7. With each PDF, determine the matching embedding or override code. * If there was a valid matching code, restore (pop) the last * remembered (pushed) embedding level and directional override. * X8. All explicit directional embeddings and overrides are completely * terminated at the end of each paragraph. Paragraph separators are not * included in the embedding. (Useless here) NOT IMPLEMENTED */ bover = 0; for (i=0; i<count; i++) { tempType = getType(line[i].wc); switch (tempType) { case RLE: currentEmbedding = levels[i] = leastGreaterOdd(currentEmbedding); levels[i] = setOverrideBits(levels[i], currentOverride); currentOverride = ON; break; case LRE: currentEmbedding = levels[i] = leastGreaterEven(currentEmbedding); levels[i] = setOverrideBits(levels[i], currentOverride); currentOverride = ON; break; case RLO: currentEmbedding = levels[i] = leastGreaterOdd(currentEmbedding); tempType = currentOverride = R; bover = 1; break; case LRO: currentEmbedding = levels[i] = leastGreaterEven(currentEmbedding); tempType = currentOverride = L; bover = 1; break; case PDF: { int prevlevel = getPreviousLevel(levels, i); if (prevlevel == -1) { currentEmbedding = paragraphLevel; currentOverride = ON; } else { currentOverride = currentEmbedding & OMASK; currentEmbedding = currentEmbedding & ~OMASK; } } levels[i] = currentEmbedding; break; /* Whitespace is treated as neutral for now */ case WS: case S: levels[i] = currentEmbedding; tempType = ON; if (currentOverride != ON) tempType = currentOverride; break; default: levels[i] = currentEmbedding; if (currentOverride != ON) tempType = currentOverride; break; } types[i] = tempType; } /* this clears out all overrides, so we can use levels safely... */ /* checks bover first */ if (bover) for (i=0; i<count; i++) levels[i] = levels[i] & LMASK; /* Rule (X9) * X9. Remove all RLE, LRE, RLO, LRO, PDF, and BN codes. * Here, they're converted to BN. */ for (i=0; i<count; i++) { switch (types[i]) { case RLE: case LRE: case RLO: case LRO: case PDF: types[i] = BN; break; } } /* Rule (W1) * W1. Examine each non-spacing mark (NSM) in the level run, and change * the type of the NSM to the type of the previous character. If the NSM * is at the start of the level run, it will get the type of sor. */ if (types[0] == NSM) types[0] = paragraphLevel; for (i=1; i<count; i++) { if (types[i] == NSM) types[i] = types[i-1]; /* Is this a safe assumption? * I assumed the previous, IS a character. */ } /* Rule (W2) * W2. Search backwards from each instance of a European number until the * first strong type (R, L, AL, or sor) is found. If an AL is found, * change the type of the European number to Arabic number. */ for (i=0; i<count; i++) { if (types[i] == EN) { j=i; while (j >= 0) { if (types[j] == AL) { types[i] = AN; break; } else if (types[j] == R || types[j] == L) { break; } j--; } } } /* Rule (W3) * W3. Change all ALs to R. * * Optimization: on Rule Xn, we might set a flag on AL type * to prevent this loop in L R lines only... */ for (i=0; i<count; i++) { if (types[i] == AL) types[i] = R; } /* Rule (W4) * W4. A single European separator between two European numbers changes * to a European number. A single common separator between two numbers * of the same type changes to that type. */ for (i=1; i<(count-1); i++) { if (types[i] == ES) { if (types[i-1] == EN && types[i+1] == EN) types[i] = EN; } else if (types[i] == CS) { if (types[i-1] == EN && types[i+1] == EN) types[i] = EN; else if (types[i-1] == AN && types[i+1] == AN) types[i] = AN; } } /* Rule (W5) * W5. A sequence of European terminators adjacent to European numbers * changes to all European numbers. * * Optimization: lots here... else ifs need rearrangement */ for (i=0; i<count; i++) { if (types[i] == ET) { if (i > 0 && types[i-1] == EN) { types[i] = EN; continue; } else if (i < count-1 && types[i+1] == EN) { types[i] = EN; continue; } else if (i < count-1 && types[i+1] == ET) { j=i; while (j <count && types[j] == ET) { j++; } if (types[j] == EN) types[i] = EN; } } } /* Rule (W6) * W6. Otherwise, separators and terminators change to Other Neutral: */ for (i=0; i<count; i++) { switch (types[i]) { case ES: case ET: case CS: types[i] = ON; break; } } /* Rule (W7) * W7. Search backwards from each instance of a European number until * the first strong type (R, L, or sor) is found. If an L is found, * then change the type of the European number to L. */ for (i=0; i<count; i++) { if (types[i] == EN) { j=i; while (j >= 0) { if (types[j] == L) { types[i] = L; break; } else if (types[j] == R || types[j] == AL) { break; } j--; } } } /* Rule (N1) * N1. A sequence of neutrals takes the direction of the surrounding * strong text if the text on both sides has the same direction. European * and Arabic numbers are treated as though they were R. */ if (count >= 2 && types[0] == ON) { if ((types[1] == R) || (types[1] == EN) || (types[1] == AN)) types[0] = R; else if (types[1] == L) types[0] = L; } for (i=1; i<(count-1); i++) { if (types[i] == ON) { if (types[i-1] == L) { j=i; while (j<(count-1) && types[j] == ON) { j++; } if (types[j] == L) { while (i<j) { types[i] = L; i++; } } } else if ((types[i-1] == R) || (types[i-1] == EN) || (types[i-1] == AN)) { j=i; while (j<(count-1) && types[j] == ON) { j++; } if ((types[j] == R) || (types[j] == EN) || (types[j] == AN)) { while (i<j) { types[i] = R; i++; } } } } } if (count >= 2 && types[count-1] == ON) { if (types[count-2] == R || types[count-2] == EN || types[count-2] == AN) types[count-1] = R; else if (types[count-2] == L) types[count-1] = L; } /* Rule (N2) * N2. Any remaining neutrals take the embedding direction. */ for (i=0; i<count; i++) { if (types[i] == ON) { if ((levels[i] % 2) == 0) types[i] = L; else types[i] = R; } } /* Rule (I1) * I1. For all characters with an even (left-to-right) embedding * direction, those of type R go up one level and those of type AN or * EN go up two levels. */ for (i=0; i<count; i++) { if ((levels[i] % 2) == 0) { if (types[i] == R) levels[i] += 1; else if (types[i] == AN || types[i] == EN) levels[i] += 2; } } /* Rule (I2) * I2. For all characters with an odd (right-to-left) embedding direction, * those of type L, EN or AN go up one level. */ for (i=0; i<count; i++) { if ((levels[i] % 2) == 1) { if (types[i] == L || types[i] == EN || types[i] == AN) levels[i] += 1; } } /* Rule (L1) * L1. On each line, reset the embedding level of the following characters * to the paragraph embedding level: * (1)segment separators, (2)paragraph separators, * (3)any sequence of whitespace characters preceding * a segment separator or paragraph separator, * (4)and any sequence of white space characters * at the end of the line. * The types of characters used here are the original types, not those * modified by the previous phase. */ j=count-1; while (j>0 && (getType(line[j].wc) == WS)) { j--; } if (j < (count-1)) { for (j++; j<count; j++) levels[j] = paragraphLevel; } for (i=0; i<count; i++) { tempType = getType(line[i].wc); if (tempType == WS) { j=i; while (j<count && (getType(line[j].wc) == WS)) { j++; } if (j==count || getType(line[j].wc) == B || getType(line[j].wc) == S) { for (j--; j>=i ; j--) { levels[j] = paragraphLevel; } } } else if (tempType == B || tempType == S) { levels[i] = paragraphLevel; } } /* Rule (L4) NOT IMPLEMENTED * L4. A character that possesses the mirrored property as specified by * Section 4.7, Mirrored, must be depicted by a mirrored glyph if the * resolved directionality of that character is R. */ /* Note: this is implemented before L2 for efficiency */ for (i=0; i<count; i++) if ((levels[i] % 2) == 1) doMirror(&line[i].wc); /* Rule (L2) * L2. From the highest level found in the text to the lowest odd level on * each line, including intermediate levels not actually present in the * text, reverse any contiguous sequence of characters that are at that * level or higher */ /* we flip the character string and leave the level array */ imax = 0; i=0; tempType = levels[0]; while (i < count) { if (levels[i] > tempType) { tempType = levels[i]; imax=i; } i++; } /* maximum level in tempType, its index in imax. */ while (tempType > 0) { /* loop from highest level to the least odd, */ /* which i assume is 1 */ flipThisRun(line, levels, tempType, count); tempType--; } /* Rule (L3) NOT IMPLEMENTED * L3. Combining marks applied to a right-to-left base character will at * this point precede their base character. If the rendering engine * expects them to follow the base characters in the final display * process, then the ordering of the marks and the base character must * be reversed. */ sfree(types); sfree(levels); return R; } /* * Bad, Horrible function * takes a pointer to a character that is checked for * having a mirror glyph. */ void doMirror(wchar_t* ch) { if ((*ch & 0xFF00) == 0) { switch (*ch) { case 0x0028: *ch = 0x0029; break; case 0x0029: *ch = 0x0028; break; case 0x003C: *ch = 0x003E; break; case 0x003E: *ch = 0x003C; break; case 0x005B: *ch = 0x005D; break; case 0x005D: *ch = 0x005B; break; case 0x007B: *ch = 0x007D; break; case 0x007D: *ch = 0x007B; break; case 0x00AB: *ch = 0x00BB; break; case 0x00BB: *ch = 0x00AB; break; } } else if ((*ch & 0xFF00) == 0x2000) { switch (*ch) { case 0x2039: *ch = 0x203A; break; case 0x203A: *ch = 0x2039; break; case 0x2045: *ch = 0x2046; break; case 0x2046: *ch = 0x2045; break; case 0x207D: *ch = 0x207E; break; case 0x207E: *ch = 0x207D; break; case 0x208D: *ch = 0x208E; break; case 0x208E: *ch = 0x208D; break; } } else if ((*ch & 0xFF00) == 0x2200) { switch (*ch) { case 0x2208: *ch = 0x220B; break; case 0x2209: *ch = 0x220C; break; case 0x220A: *ch = 0x220D; break; case 0x220B: *ch = 0x2208; break; case 0x220C: *ch = 0x2209; break; case 0x220D: *ch = 0x220A; break; case 0x2215: *ch = 0x29F5; break; case 0x223C: *ch = 0x223D; break; case 0x223D: *ch = 0x223C; break; case 0x2243: *ch = 0x22CD; break; case 0x2252: *ch = 0x2253; break; case 0x2253: *ch = 0x2252; break; case 0x2254: *ch = 0x2255; break; case 0x2255: *ch = 0x2254; break; case 0x2264: *ch = 0x2265; break; case 0x2265: *ch = 0x2264; break; case 0x2266: *ch = 0x2267; break; case 0x2267: *ch = 0x2266; break; case 0x2268: *ch = 0x2269; break; case 0x2269: *ch = 0x2268; break; case 0x226A: *ch = 0x226B; break; case 0x226B: *ch = 0x226A; break; case 0x226E: *ch = 0x226F; break; case 0x226F: *ch = 0x226E; break; case 0x2270: *ch = 0x2271; break; case 0x2271: *ch = 0x2270; break; case 0x2272: *ch = 0x2273; break; case 0x2273: *ch = 0x2272; break; case 0x2274: *ch = 0x2275; break; case 0x2275: *ch = 0x2274; break; case 0x2276: *ch = 0x2277; break; case 0x2277: *ch = 0x2276; break; case 0x2278: *ch = 0x2279; break; case 0x2279: *ch = 0x2278; break; case 0x227A: *ch = 0x227B; break; case 0x227B: *ch = 0x227A; break; case 0x227C: *ch = 0x227D; break; case 0x227D: *ch = 0x227C; break; case 0x227E: *ch = 0x227F; break; case 0x227F: *ch = 0x227E; break; case 0x2280: *ch = 0x2281; break; case 0x2281: *ch = 0x2280; break; case 0x2282: *ch = 0x2283; break; case 0x2283: *ch = 0x2282; break; case 0x2284: *ch = 0x2285; break; case 0x2285: *ch = 0x2284; break; case 0x2286: *ch = 0x2287; break; case 0x2287: *ch = 0x2286; break; case 0x2288: *ch = 0x2289; break; case 0x2289: *ch = 0x2288; break; case 0x228A: *ch = 0x228B; break; case 0x228B: *ch = 0x228A; break; case 0x228F: *ch = 0x2290; break; case 0x2290: *ch = 0x228F; break; case 0x2291: *ch = 0x2292; break; case 0x2292: *ch = 0x2291; break; case 0x2298: *ch = 0x29B8; break; case 0x22A2: *ch = 0x22A3; break; case 0x22A3: *ch = 0x22A2; break; case 0x22A6: *ch = 0x2ADE; break; case 0x22A8: *ch = 0x2AE4; break; case 0x22A9: *ch = 0x2AE3; break; case 0x22AB: *ch = 0x2AE5; break; case 0x22B0: *ch = 0x22B1; break; case 0x22B1: *ch = 0x22B0; break; case 0x22B2: *ch = 0x22B3; break; case 0x22B3: *ch = 0x22B2; break; case 0x22B4: *ch = 0x22B5; break; case 0x22B5: *ch = 0x22B4; break; case 0x22B6: *ch = 0x22B7; break; case 0x22B7: *ch = 0x22B6; break; case 0x22C9: *ch = 0x22CA; break; case 0x22CA: *ch = 0x22C9; break; case 0x22CB: *ch = 0x22CC; break; case 0x22CC: *ch = 0x22CB; break; case 0x22CD: *ch = 0x2243; break; case 0x22D0: *ch = 0x22D1; break; case 0x22D1: *ch = 0x22D0; break; case 0x22D6: *ch = 0x22D7; break; case 0x22D7: *ch = 0x22D6; break; case 0x22D8: *ch = 0x22D9; break; case 0x22D9: *ch = 0x22D8; break; case 0x22DA: *ch = 0x22DB; break; case 0x22DB: *ch = 0x22DA; break; case 0x22DC: *ch = 0x22DD; break; case 0x22DD: *ch = 0x22DC; break; case 0x22DE: *ch = 0x22DF; break; case 0x22DF: *ch = 0x22DE; break; case 0x22E0: *ch = 0x22E1; break; case 0x22E1: *ch = 0x22E0; break; case 0x22E2: *ch = 0x22E3; break; case 0x22E3: *ch = 0x22E2; break; case 0x22E4: *ch = 0x22E5; break; case 0x22E5: *ch = 0x22E4; break; case 0x22E6: *ch = 0x22E7; break; case 0x22E7: *ch = 0x22E6; break; case 0x22E8: *ch = 0x22E9; break; case 0x22E9: *ch = 0x22E8; break; case 0x22EA: *ch = 0x22EB; break; case 0x22EB: *ch = 0x22EA; break; case 0x22EC: *ch = 0x22ED; break; case 0x22ED: *ch = 0x22EC; break; case 0x22F0: *ch = 0x22F1; break; case 0x22F1: *ch = 0x22F0; break; case 0x22F2: *ch = 0x22FA; break; case 0x22F3: *ch = 0x22FB; break; case 0x22F4: *ch = 0x22FC; break; case 0x22F6: *ch = 0x22FD; break; case 0x22F7: *ch = 0x22FE; break; case 0x22FA: *ch = 0x22F2; break; case 0x22FB: *ch = 0x22F3; break; case 0x22FC: *ch = 0x22F4; break; case 0x22FD: *ch = 0x22F6; break; case 0x22FE: *ch = 0x22F7; break; } } else if ((*ch & 0xFF00) == 0x2300) { switch (*ch) { case 0x2308: *ch = 0x2309; break; case 0x2309: *ch = 0x2308; break; case 0x230A: *ch = 0x230B; break; case 0x230B: *ch = 0x230A; break; case 0x2329: *ch = 0x232A; break; case 0x232A: *ch = 0x2329; break; } } else if ((*ch & 0xFF00) == 0x2700) { switch (*ch) { case 0x2768: *ch = 0x2769; break; case 0x2769: *ch = 0x2768; break; case 0x276A: *ch = 0x276B; break; case 0x276B: *ch = 0x276A; break; case 0x276C: *ch = 0x276D; break; case 0x276D: *ch = 0x276C; break; case 0x276E: *ch = 0x276F; break; case 0x276F: *ch = 0x276E; break; case 0x2770: *ch = 0x2771; break; case 0x2771: *ch = 0x2770; break; case 0x2772: *ch = 0x2773; break; case 0x2773: *ch = 0x2772; break; case 0x2774: *ch = 0x2775; break; case 0x2775: *ch = 0x2774; break; case 0x27D5: *ch = 0x27D6; break; case 0x27D6: *ch = 0x27D5; break; case 0x27DD: *ch = 0x27DE; break; case 0x27DE: *ch = 0x27DD; break; case 0x27E2: *ch = 0x27E3; break; case 0x27E3: *ch = 0x27E2; break; case 0x27E4: *ch = 0x27E5; break; case 0x27E5: *ch = 0x27E4; break; case 0x27E6: *ch = 0x27E7; break; case 0x27E7: *ch = 0x27E6; break; case 0x27E8: *ch = 0x27E9; break; case 0x27E9: *ch = 0x27E8; break; case 0x27EA: *ch = 0x27EB; break; case 0x27EB: *ch = 0x27EA; break; } } else if ((*ch & 0xFF00) == 0x2900) { switch (*ch) { case 0x2983: *ch = 0x2984; break; case 0x2984: *ch = 0x2983; break; case 0x2985: *ch = 0x2986; break; case 0x2986: *ch = 0x2985; break; case 0x2987: *ch = 0x2988; break; case 0x2988: *ch = 0x2987; break; case 0x2989: *ch = 0x298A; break; case 0x298A: *ch = 0x2989; break; case 0x298B: *ch = 0x298C; break; case 0x298C: *ch = 0x298B; break; case 0x298D: *ch = 0x2990; break; case 0x298E: *ch = 0x298F; break; case 0x298F: *ch = 0x298E; break; case 0x2990: *ch = 0x298D; break; case 0x2991: *ch = 0x2992; break; case 0x2992: *ch = 0x2991; break; case 0x2993: *ch = 0x2994; break; case 0x2994: *ch = 0x2993; break; case 0x2995: *ch = 0x2996; break; case 0x2996: *ch = 0x2995; break; case 0x2997: *ch = 0x2998; break; case 0x2998: *ch = 0x2997; break; case 0x29B8: *ch = 0x2298; break; case 0x29C0: *ch = 0x29C1; break; case 0x29C1: *ch = 0x29C0; break; case 0x29C4: *ch = 0x29C5; break; case 0x29C5: *ch = 0x29C4; break; case 0x29CF: *ch = 0x29D0; break; case 0x29D0: *ch = 0x29CF; break; case 0x29D1: *ch = 0x29D2; break; case 0x29D2: *ch = 0x29D1; break; case 0x29D4: *ch = 0x29D5; break; case 0x29D5: *ch = 0x29D4; break; case 0x29D8: *ch = 0x29D9; break; case 0x29D9: *ch = 0x29D8; break; case 0x29DA: *ch = 0x29DB; break; case 0x29DB: *ch = 0x29DA; break; case 0x29F5: *ch = 0x2215; break; case 0x29F8: *ch = 0x29F9; break; case 0x29F9: *ch = 0x29F8; break; case 0x29FC: *ch = 0x29FD; break; case 0x29FD: *ch = 0x29FC; break; } } else if ((*ch & 0xFF00) == 0x2A00) { switch (*ch) { case 0x2A2B: *ch = 0x2A2C; break; case 0x2A2C: *ch = 0x2A2B; break; case 0x2A2D: *ch = 0x2A2C; break; case 0x2A2E: *ch = 0x2A2D; break; case 0x2A34: *ch = 0x2A35; break; case 0x2A35: *ch = 0x2A34; break; case 0x2A3C: *ch = 0x2A3D; break; case 0x2A3D: *ch = 0x2A3C; break; case 0x2A64: *ch = 0x2A65; break; case 0x2A65: *ch = 0x2A64; break; case 0x2A79: *ch = 0x2A7A; break; case 0x2A7A: *ch = 0x2A79; break; case 0x2A7D: *ch = 0x2A7E; break; case 0x2A7E: *ch = 0x2A7D; break; case 0x2A7F: *ch = 0x2A80; break; case 0x2A80: *ch = 0x2A7F; break; case 0x2A81: *ch = 0x2A82; break; case 0x2A82: *ch = 0x2A81; break; case 0x2A83: *ch = 0x2A84; break; case 0x2A84: *ch = 0x2A83; break; case 0x2A8B: *ch = 0x2A8C; break; case 0x2A8C: *ch = 0x2A8B; break; case 0x2A91: *ch = 0x2A92; break; case 0x2A92: *ch = 0x2A91; break; case 0x2A93: *ch = 0x2A94; break; case 0x2A94: *ch = 0x2A93; break; case 0x2A95: *ch = 0x2A96; break; case 0x2A96: *ch = 0x2A95; break; case 0x2A97: *ch = 0x2A98; break; case 0x2A98: *ch = 0x2A97; break; case 0x2A99: *ch = 0x2A9A; break; case 0x2A9A: *ch = 0x2A99; break; case 0x2A9B: *ch = 0x2A9C; break; case 0x2A9C: *ch = 0x2A9B; break; case 0x2AA1: *ch = 0x2AA2; break; case 0x2AA2: *ch = 0x2AA1; break; case 0x2AA6: *ch = 0x2AA7; break; case 0x2AA7: *ch = 0x2AA6; break; case 0x2AA8: *ch = 0x2AA9; break; case 0x2AA9: *ch = 0x2AA8; break; case 0x2AAA: *ch = 0x2AAB; break; case 0x2AAB: *ch = 0x2AAA; break; case 0x2AAC: *ch = 0x2AAD; break; case 0x2AAD: *ch = 0x2AAC; break; case 0x2AAF: *ch = 0x2AB0; break; case 0x2AB0: *ch = 0x2AAF; break; case 0x2AB3: *ch = 0x2AB4; break; case 0x2AB4: *ch = 0x2AB3; break; case 0x2ABB: *ch = 0x2ABC; break; case 0x2ABC: *ch = 0x2ABB; break; case 0x2ABD: *ch = 0x2ABE; break; case 0x2ABE: *ch = 0x2ABD; break; case 0x2ABF: *ch = 0x2AC0; break; case 0x2AC0: *ch = 0x2ABF; break; case 0x2AC1: *ch = 0x2AC2; break; case 0x2AC2: *ch = 0x2AC1; break; case 0x2AC3: *ch = 0x2AC4; break; case 0x2AC4: *ch = 0x2AC3; break; case 0x2AC5: *ch = 0x2AC6; break; case 0x2AC6: *ch = 0x2AC5; break; case 0x2ACD: *ch = 0x2ACE; break; case 0x2ACE: *ch = 0x2ACD; break; case 0x2ACF: *ch = 0x2AD0; break; case 0x2AD0: *ch = 0x2ACF; break; case 0x2AD1: *ch = 0x2AD2; break; case 0x2AD2: *ch = 0x2AD1; break; case 0x2AD3: *ch = 0x2AD4; break; case 0x2AD4: *ch = 0x2AD3; break; case 0x2AD5: *ch = 0x2AD6; break; case 0x2AD6: *ch = 0x2AD5; break; case 0x2ADE: *ch = 0x22A6; break; case 0x2AE3: *ch = 0x22A9; break; case 0x2AE4: *ch = 0x22A8; break; case 0x2AE5: *ch = 0x22AB; break; case 0x2AEC: *ch = 0x2AED; break; case 0x2AED: *ch = 0x2AEC; break; case 0x2AF7: *ch = 0x2AF8; break; case 0x2AF8: *ch = 0x2AF7; break; case 0x2AF9: *ch = 0x2AFA; break; case 0x2AFA: *ch = 0x2AF9; break; } } else if ((*ch & 0xFF00) == 0x3000) { switch (*ch) { case 0x3008: *ch = 0x3009; break; case 0x3009: *ch = 0x3008; break; case 0x300A: *ch = 0x300B; break; case 0x300B: *ch = 0x300A; break; case 0x300C: *ch = 0x300D; break; case 0x300D: *ch = 0x300C; break; case 0x300E: *ch = 0x300F; break; case 0x300F: *ch = 0x300E; break; case 0x3010: *ch = 0x3011; break; case 0x3011: *ch = 0x3010; break; case 0x3014: *ch = 0x3015; break; case 0x3015: *ch = 0x3014; break; case 0x3016: *ch = 0x3017; break; case 0x3017: *ch = 0x3016; break; case 0x3018: *ch = 0x3019; break; case 0x3019: *ch = 0x3018; break; case 0x301A: *ch = 0x301B; break; case 0x301B: *ch = 0x301A; break; } } else if ((*ch & 0xFF00) == 0xFF00) { switch (*ch) { case 0xFF08: *ch = 0xFF09; break; case 0xFF09: *ch = 0xFF08; break; case 0xFF1C: *ch = 0xFF1E; break; case 0xFF1E: *ch = 0xFF1C; break; case 0xFF3B: *ch = 0xFF3D; break; case 0xFF3D: *ch = 0xFF3B; break; case 0xFF5B: *ch = 0xFF5D; break; case 0xFF5D: *ch = 0xFF5B; break; case 0xFF5F: *ch = 0xFF60; break; case 0xFF60: *ch = 0xFF5F; break; case 0xFF62: *ch = 0xFF63; break; case 0xFF63: *ch = 0xFF62; break; } } } #ifdef TEST_GETTYPE #include <stdio.h> #include <assert.h> int main(int argc, char **argv) { static const struct { int type; char *name; } typetoname[] = { #define TYPETONAME(X) { X , #X } TYPETONAME(L), TYPETONAME(LRE), TYPETONAME(LRO), TYPETONAME(R), TYPETONAME(AL), TYPETONAME(RLE), TYPETONAME(RLO), TYPETONAME(PDF), TYPETONAME(EN), TYPETONAME(ES), TYPETONAME(ET), TYPETONAME(AN), TYPETONAME(CS), TYPETONAME(NSM), TYPETONAME(BN), TYPETONAME(B), TYPETONAME(S), TYPETONAME(WS), TYPETONAME(ON), #undef TYPETONAME }; int i; for (i = 1; i < argc; i++) { unsigned long chr = strtoul(argv[i], NULL, 0); int type = getType(chr); assert(typetoname[type].type == type); printf("U+%04x: %s\n", chr, typetoname[type].name); } return 0; } #endif
Java
/* This file is part of Jedi Academy. Jedi Academy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. Jedi Academy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Jedi Academy. If not, see <http://www.gnu.org/licenses/>. */ // Copyright 2001-2013 Raven Software #pragma once #if !defined(RM_MISSION_H_INC) #define RM_MISSION_H_INC #ifdef DEBUG_LINKING #pragma message("...including RM_Mission.h") #endif // maximum random choices #define MAX_RANDOM_CHOICES 100 typedef vector<int> rmIntVector_t; class CRMMission { private: rmObjectiveList_t mObjectives; rmInstanceList_t mInstances; CRMInstanceFile mInstanceFile; CRMObjective* mCurrentObjective; bool mValidNodes; bool mValidPaths; bool mValidRivers; bool mValidWeapons; bool mValidAmmo; bool mValidObjectives; bool mValidInstances; int mTimeLimit; int mMaxInstancePosition; // npc multipliers float mAccuracyMultiplier; float mHealthMultiplier; // % chance that RMG pickup is actually spawned float mPickupHealth; float mPickupArmor; float mPickupAmmo; float mPickupWeapon; float mPickupEquipment; string mDescription; string mExitScreen; string mTimeExpiredScreen; // symmetric landscape style symmetry_t mSymmetric; // if set to 1 in the mission file, adds an extra connecting path in symmetric maps // to ensure both sides actually do connect int mBackUpPath; int mDefaultPadding; CRMAreaManager* mAreaManager; CRMPathManager* mPathManager; CRandomTerrain* mLandScape; public: CRMMission ( CRandomTerrain* ); ~CRMMission ( ); bool Load ( const char* name, const char* instances, const char* difficulty ); bool Spawn ( CRandomTerrain* terrain, qboolean IsServer ); void Preview ( const vec3_t from ); CRMObjective* FindObjective ( const char* name ); CRMObjective* GetCurrentObjective ( ) { return mCurrentObjective; } void CompleteMission (void); void FailedMission (bool TimeExpired); void CompleteObjective ( CRMObjective* ojective ); int GetTimeLimit (void) { return mTimeLimit; } int GetMaxInstancePosition (void) { return mMaxInstancePosition; } const char* GetDescription (void) { return mDescription.c_str(); } const char* GetExitScreen (void) { return mExitScreen.c_str(); } int GetSymmetric (void) { return mSymmetric; } int GetBackUpPath (void) { return mBackUpPath; } int GetDefaultPadding (void) { return mDefaultPadding; } // void CreateMap ( void ); bool DenyPickupHealth () {return mLandScape->flrand(0.0f,1.0f) > mPickupHealth;} bool DenyPickupArmor () {return mLandScape->flrand(0.0f,1.0f) > mPickupArmor;} bool DenyPickupAmmo () {return mLandScape->flrand(0.0f,1.0f) > mPickupAmmo;} bool DenyPickupWeapon () {return mLandScape->flrand(0.0f,1.0f) > mPickupWeapon;} bool DenyPickupEquipment () {return mLandScape->flrand(0.0f,1.0f) > mPickupEquipment;} private: // void PurgeUnlinkedTriggers ( ); // void PurgeTrigger ( CEntity* trigger ); void MirrorPos (vec3_t pos); CGPGroup* ParseRandom ( CGPGroup* random ); bool ParseOrigin ( CGPGroup* originGroup, vec3_t origin, vec3_t lookat, int* flattenHeight ); bool ParseNodes ( CGPGroup* group ); bool ParsePaths ( CGPGroup *paths); bool ParseRivers ( CGPGroup *rivers); void PlaceBridges (); void PlaceWallInstance(CRMInstance* instance, float xpos, float ypos, float zpos, int x, int y, float angle); bool ParseDifficulty ( CGPGroup* difficulty, CGPGroup *parent ); bool ParseWeapons ( CGPGroup* weapons ); bool ParseAmmo ( CGPGroup* ammo ); bool ParseOutfit ( CGPGroup* outfit ); bool ParseObjectives ( CGPGroup* objectives ); bool ParseInstance ( CGPGroup* instance ); bool ParseInstances ( CGPGroup* instances ); bool ParseInstancesOnPath ( CGPGroup* group ); bool ParseWallRect ( CGPGroup* group, int side); // void SpawnNPCTriggers ( CCMLandScape* landscape ); // void AttachNPCTriggers ( CCMLandScape* landscape ); }; #endif
Java
/* * xHCI host controller driver * * Copyright (C) 2008 Intel Corp. * * Author: Sarah Sharp * Some code borrowed from the Linux EHCI driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/gfp.h> #include <asm/unaligned.h> #include "xhci.h" #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ PORT_RC | PORT_PLC | PORT_PE) /* usb 1.1 root hub device descriptor */ static u8 usb_bos_descriptor [] = { USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */ USB_DT_BOS, /* __u8 bDescriptorType */ 0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */ 0x1, /* __u8 bNumDeviceCaps */ /* First device capability */ USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */ USB_DT_DEVICE_CAPABILITY, /* Device Capability */ USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */ 0x00, /* bmAttributes, LTM off by default */ USB_5GBPS_OPERATION, 0x00, /* wSpeedsSupported, 5Gbps only */ 0x03, /* bFunctionalitySupport, USB 3.0 speed only */ 0x00, /* bU1DevExitLat, set later. */ 0x00, 0x00 /* __le16 bU2DevExitLat, set later. */ }; static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, struct usb_hub_descriptor *desc, int ports) { u16 temp; desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */ desc->bHubContrCurrent = 0; desc->bNbrPorts = ports; /* Ugh, these should be #defines, FIXME */ /* Using table 11-13 in USB 2.0 spec. */ temp = 0; /* Bits 1:0 - support port power switching, or power always on */ if (HCC_PPC(xhci->hcc_params)) temp |= 0x0001; else temp |= 0x0002; /* Bit 2 - root hubs are not part of a compound device */ /* Bits 4:3 - individual port over current protection */ temp |= 0x0008; /* Bits 6:5 - no TTs in root ports */ /* Bit 7 - no port indicators */ desc->wHubCharacteristics = cpu_to_le16(temp); } /* Fill in the USB 2.0 roothub descriptor */ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, struct usb_hub_descriptor *desc) { int ports; u16 temp; __u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8]; u32 portsc; unsigned int i; ports = xhci->num_usb2_ports; xhci_common_hub_descriptor(xhci, desc, ports); desc->bDescriptorType = 0x29; temp = 1 + (ports / 8); desc->bDescLength = 7 + 2 * temp; /* The Device Removable bits are reported on a byte granularity. * If the port doesn't exist within that byte, the bit is set to 0. */ memset(port_removable, 0, sizeof(port_removable)); for (i = 0; i < ports; i++) { portsc = xhci_readl(xhci, xhci->usb2_ports[i]); /* If a device is removable, PORTSC reports a 0, same as in the * hub descriptor DeviceRemovable bits. */ if (portsc & PORT_DEV_REMOVE) /* This math is hairy because bit 0 of DeviceRemovable * is reserved, and bit 1 is for port 1, etc. */ port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8); } /* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN * ports on it. The USB 2.0 specification says that there are two * variable length fields at the end of the hub descriptor: * DeviceRemovable and PortPwrCtrlMask. But since we can have less than * USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array * to set PortPwrCtrlMask bits. PortPwrCtrlMask must always be set to * 0xFF, so we initialize the both arrays (DeviceRemovable and * PortPwrCtrlMask) to 0xFF. Then we set the DeviceRemovable for each * set of ports that actually exist. */ memset(desc->u.hs.DeviceRemovable, 0xff, sizeof(desc->u.hs.DeviceRemovable)); memset(desc->u.hs.PortPwrCtrlMask, 0xff, sizeof(desc->u.hs.PortPwrCtrlMask)); for (i = 0; i < (ports + 1 + 7) / 8; i++) memset(&desc->u.hs.DeviceRemovable[i], port_removable[i], sizeof(__u8)); } /* Fill in the USB 3.0 roothub descriptor */ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, struct usb_hub_descriptor *desc) { int ports; u16 port_removable; u32 portsc; unsigned int i; ports = xhci->num_usb3_ports; xhci_common_hub_descriptor(xhci, desc, ports); desc->bDescriptorType = 0x2a; desc->bDescLength = 12; /* header decode latency should be zero for roothubs, * see section 4.23.5.2. */ desc->u.ss.bHubHdrDecLat = 0; desc->u.ss.wHubDelay = 0; port_removable = 0; /* bit 0 is reserved, bit 1 is for port 1, etc. */ for (i = 0; i < ports; i++) { portsc = xhci_readl(xhci, xhci->usb3_ports[i]); if (portsc & PORT_DEV_REMOVE) port_removable |= 1 << (i + 1); } memset(&desc->u.ss.DeviceRemovable, (__force __u16) cpu_to_le16(port_removable), sizeof(__u16)); } static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, struct usb_hub_descriptor *desc) { if (hcd->speed == HCD_USB3) xhci_usb3_hub_descriptor(hcd, xhci, desc); else xhci_usb2_hub_descriptor(hcd, xhci, desc); } static unsigned int xhci_port_speed(unsigned int port_status) { if (DEV_LOWSPEED(port_status)) return USB_PORT_STAT_LOW_SPEED; if (DEV_HIGHSPEED(port_status)) return USB_PORT_STAT_HIGH_SPEED; /* * FIXME: Yes, we should check for full speed, but the core uses that as * a default in portspeed() in usb/core/hub.c (which is the only place * USB_PORT_STAT_*_SPEED is used). */ return 0; } /* * These bits are Read Only (RO) and should be saved and written to the * registers: 0, 3, 10:13, 30 * connect status, over-current status, port speed, and device removable. * connect status and port speed are also sticky - meaning they're in * the AUX well and they aren't changed by a hot, warm, or cold reset. */ #define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30)) /* * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit: * bits 5:8, 9, 14:15, 25:27 * link state, port power, port indicator state, "wake on" enable state */ #define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25)) /* * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect: * bit 4 (port reset) */ #define XHCI_PORT_RW1S ((1<<4)) /* * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect: * bits 1, 17, 18, 19, 20, 21, 22, 23 * port enable/disable, and * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports), * over-current, reset, link state, and L1 change */ #define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17)) /* * Bit 16 is RW, and writing a '1' to it causes the link state control to be * latched in */ #define XHCI_PORT_RW ((1<<16)) /* * These bits are Reserved Zero (RsvdZ) and zero should be written to them: * bits 2, 24, 28:31 */ #define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28)) /* * Given a port state, this function returns a value that would result in the * port being in the same state, if the value was written to the port status * control register. * Save Read Only (RO) bits and save read/write bits where * writing a 0 clears the bit and writing a 1 sets the bit (RWS). * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. */ u32 xhci_port_state_to_neutral(u32 state) { /* Save read-only status and port state */ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); } /* * find slot id based on port number. * @port: The one-based port number from one of the two split roothubs. */ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, u16 port) { int slot_id; int i; enum usb_device_speed speed; slot_id = 0; for (i = 0; i < MAX_HC_SLOTS; i++) { if (!xhci->devs[i]) continue; speed = xhci->devs[i]->udev->speed; if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3)) && xhci->devs[i]->fake_port == port) { slot_id = i; break; } } return slot_id; } /* * Stop device * It issues stop endpoint command for EP 0 to 30. And wait the last command * to complete. * suspend will set to 1, if suspend bit need to set in command. */ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) { struct xhci_virt_device *virt_dev; struct xhci_command *cmd; unsigned long flags; int timeleft; int ret; int i; ret = 0; virt_dev = xhci->devs[slot_id]; cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); if (!cmd) { xhci_dbg(xhci, "Couldn't allocate command structure.\n"); return -ENOMEM; } spin_lock_irqsave(&xhci->lock, flags); for (i = LAST_EP_INDEX; i > 0; i--) { if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); } cmd->command_trb = xhci->cmd_ring->enqueue; list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* Wait for last stop endpoint command to finish */ timeleft = wait_for_completion_interruptible_timeout( cmd->completion, USB_CTRL_SET_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for stop endpoint command\n", timeleft == 0 ? "Timeout" : "Signal"); spin_lock_irqsave(&xhci->lock, flags); /* The timeout might have raced with the event ring handler, so * only delete from the list if the item isn't poisoned. */ if (cmd->cmd_list.next != LIST_POISON1) list_del(&cmd->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); ret = -ETIME; goto command_cleanup; } command_cleanup: xhci_free_command(xhci, cmd); return ret; } /* * Ring device, it rings the all doorbells unconditionally. */ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) { int i; for (i = 0; i < LAST_EP_INDEX + 1; i++) if (xhci->devs[slot_id]->eps[i].ring && xhci->devs[slot_id]->eps[i].ring->dequeue) xhci_ring_ep_doorbell(xhci, slot_id, i, 0); return; } static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, u16 wIndex, __le32 __iomem *addr, u32 port_status) { /* Don't allow the USB core to disable SuperSpeed ports. */ if (hcd->speed == HCD_USB3) { xhci_dbg(xhci, "Ignoring request to disable " "SuperSpeed port.\n"); return; } /* Write 1 to disable the port */ xhci_writel(xhci, port_status | PORT_PE, addr); port_status = xhci_readl(xhci, addr); xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n", wIndex, port_status); } static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, u16 wIndex, __le32 __iomem *addr, u32 port_status) { char *port_change_bit; u32 status; switch (wValue) { case USB_PORT_FEAT_C_RESET: status = PORT_RC; port_change_bit = "reset"; break; case USB_PORT_FEAT_C_BH_PORT_RESET: status = PORT_WRC; port_change_bit = "warm(BH) reset"; break; case USB_PORT_FEAT_C_CONNECTION: status = PORT_CSC; port_change_bit = "connect"; break; case USB_PORT_FEAT_C_OVER_CURRENT: status = PORT_OCC; port_change_bit = "over-current"; break; case USB_PORT_FEAT_C_ENABLE: status = PORT_PEC; port_change_bit = "enable/disable"; break; case USB_PORT_FEAT_C_SUSPEND: status = PORT_PLC; port_change_bit = "suspend/resume"; break; case USB_PORT_FEAT_C_PORT_LINK_STATE: status = PORT_PLC; port_change_bit = "link state"; break; default: /* Should never happen */ return; } /* Change bits are all write 1 to clear */ xhci_writel(xhci, port_status | status, addr); port_status = xhci_readl(xhci, addr); xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n", port_change_bit, wIndex, port_status); } static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array) { int max_ports; struct xhci_hcd *xhci = hcd_to_xhci(hcd); if (hcd->speed == HCD_USB3) { max_ports = xhci->num_usb3_ports; *port_array = xhci->usb3_ports; } else { max_ports = xhci->num_usb2_ports; *port_array = xhci->usb2_ports; } return max_ports; } void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, int port_id, u32 link_state) { u32 temp; temp = xhci_readl(xhci, port_array[port_id]); temp = xhci_port_state_to_neutral(temp); temp &= ~PORT_PLS_MASK; temp |= PORT_LINK_STROBE | link_state; xhci_writel(xhci, temp, port_array[port_id]); } /* Test and clear port RWC bit */ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array, int port_id, u32 port_bit) { u32 temp; temp = xhci_readl(xhci, port_array[port_id]); if (temp & port_bit) { temp = xhci_port_state_to_neutral(temp); temp |= port_bit; xhci_writel(xhci, temp, port_array[port_id]); } } /* Updates Link Status for super Speed port */ static void xhci_hub_report_link_state(struct xhci_hcd *xhci, u32 *status, u32 status_reg) { u32 pls = status_reg & PORT_PLS_MASK; /* resume state is a xHCI internal state. * Do not report it to usb core. */ if (pls == XDEV_RESUME) return; /* When the CAS bit is set then warm reset * should be performed on port */ if (status_reg & PORT_CAS) { /* The CAS bit can be set while the port is * in any link state. * Only roothubs have CAS bit, so we * pretend to be in compliance mode * unless we're already in compliance * or the inactive state. */ if (pls != USB_SS_PORT_LS_COMP_MOD && pls != USB_SS_PORT_LS_SS_INACTIVE) { pls = USB_SS_PORT_LS_COMP_MOD; } /* Return also connection bit - * hub state machine resets port * when this bit is set. */ pls |= USB_PORT_STAT_CONNECTION; } else { /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core * notices the Compliance state and resets the port. * This resolves an issue generated by the SN65LVPE502CP * in which sometimes the port enters compliance mode * caused by a delay on the host-device negotiation. */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (pls == USB_SS_PORT_LS_COMP_MOD)) pls |= USB_PORT_STAT_CONNECTION; } /* update status field */ *status |= pls; } /* * Function for Compliance Mode Quirk. * * This Function verifies if all xhc USB3 ports have entered U0, if so, * the compliance mode timer is deleted. A port won't enter * compliance mode if it has previously entered U0. */ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) { u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1); bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK)) return; if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { xhci->port_status_u0 |= 1 << wIndex; if (xhci->port_status_u0 == all_ports_seen_u0) { del_timer_sync(&xhci->comp_mode_recovery_timer); xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n"); xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n"); } } } int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports; unsigned long flags; u32 temp, status; int retval = 0; __le32 __iomem **port_array; int slot_id; struct xhci_bus_state *bus_state; u16 link_state = 0; max_ports = xhci_get_ports(hcd, &port_array); bus_state = &xhci->bus_state[hcd_index(hcd)]; spin_lock_irqsave(&xhci->lock, flags); switch (typeReq) { case GetHubStatus: /* No power source, over-current reported per port */ memset(buf, 0, 4); break; case GetHubDescriptor: /* Check to make sure userspace is asking for the USB 3.0 hub * descriptor for the USB 3.0 roothub. If not, we stall the * endpoint, like external hubs do. */ if (hcd->speed == HCD_USB3 && (wLength < USB_DT_SS_HUB_SIZE || wValue != (USB_DT_SS_HUB << 8))) { xhci_dbg(xhci, "Wrong hub descriptor type for " "USB 3.0 roothub.\n"); goto error; } xhci_hub_descriptor(hcd, xhci, (struct usb_hub_descriptor *) buf); break; case DeviceRequest | USB_REQ_GET_DESCRIPTOR: if ((wValue & 0xff00) != (USB_DT_BOS << 8)) goto error; if (hcd->speed != HCD_USB3) goto error; memcpy(buf, &usb_bos_descriptor, USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE); temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); buf[12] = HCS_U1_LATENCY(temp); put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]); spin_unlock_irqrestore(&xhci->lock, flags); return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; case GetPortStatus: if (!wIndex || wIndex > max_ports) goto error; wIndex--; status = 0; temp = xhci_readl(xhci, port_array[wIndex]); if (temp == 0xffffffff) { retval = -ENODEV; break; } xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); /* wPortChange bits */ if (temp & PORT_CSC) status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC)) status |= USB_PORT_STAT_C_OVERCURRENT << 16; if ((temp & PORT_RC)) status |= USB_PORT_STAT_C_RESET << 16; /* USB3.0 only */ if (hcd->speed == HCD_USB3) { if ((temp & PORT_PLC)) status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((temp & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; } if (hcd->speed != HCD_USB3) { if ((temp & PORT_PLS_MASK) == XDEV_U3 && (temp & PORT_POWER)) status |= USB_PORT_STAT_SUSPEND; } if ((temp & PORT_PLS_MASK) == XDEV_RESUME && !DEV_SUPERSPEED(temp)) { if ((temp & PORT_RESET) || !(temp & PORT_PE)) goto error; if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) { xhci_dbg(xhci, "Resume USB2 port %d\n", wIndex + 1); bus_state->resume_done[wIndex] = 0; xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); xhci_dbg(xhci, "set port %d resume\n", wIndex + 1); slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); goto error; } xhci_ring_device(xhci, slot_id); bus_state->port_c_suspend |= 1 << wIndex; bus_state->suspended_ports &= ~(1 << wIndex); } else { /* * The resume has been signaling for less than * 20ms. Report the port status as SUSPEND, * let the usbcore check port status again * and clear resume signaling later. */ status |= USB_PORT_STAT_SUSPEND; } } if ((temp & PORT_PLS_MASK) == XDEV_U0 && (temp & PORT_POWER) && (bus_state->suspended_ports & (1 << wIndex))) { bus_state->suspended_ports &= ~(1 << wIndex); if (hcd->speed != HCD_USB3) bus_state->port_c_suspend |= 1 << wIndex; } if (temp & PORT_CONNECT) { status |= USB_PORT_STAT_CONNECTION; status |= xhci_port_speed(temp); } if (temp & PORT_PE) status |= USB_PORT_STAT_ENABLE; if (temp & PORT_OC) status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) { if (hcd->speed == HCD_USB3) status |= USB_SS_PORT_STAT_POWER; else status |= USB_PORT_STAT_POWER; } /* Update Port Link State for super speed ports*/ if (hcd->speed == HCD_USB3) { xhci_hub_report_link_state(xhci, &status, temp); /* * Verify if all USB3 Ports Have entered U0 already. * Delete Compliance Mode Timer if so. */ xhci_del_comp_mod_timer(xhci, temp, wIndex); } if (bus_state->port_c_suspend & (1 << wIndex)) status |= 1 << USB_PORT_FEAT_C_SUSPEND; xhci_dbg(xhci, "Get port status returned 0x%x\n", status); put_unaligned(cpu_to_le32(status), (__le32 *) buf); break; case SetPortFeature: if (wValue == USB_PORT_FEAT_LINK_STATE) link_state = (wIndex & 0xff00) >> 3; wIndex &= 0xff; if (!wIndex || wIndex > max_ports) goto error; wIndex--; temp = xhci_readl(xhci, port_array[wIndex]); if (temp == 0xffffffff) { retval = -ENODEV; break; } temp = xhci_port_state_to_neutral(temp); /* FIXME: What new port features do we need to support? */ switch (wValue) { case USB_PORT_FEAT_SUSPEND: temp = xhci_readl(xhci, port_array[wIndex]); if ((temp & PORT_PLS_MASK) != XDEV_U0) { /* Resume the port to U0 first */ xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); spin_unlock_irqrestore(&xhci->lock, flags); msleep(10); spin_lock_irqsave(&xhci->lock, flags); } /* In spec software should not attempt to suspend * a port unless the port reports that it is in the * enabled (PED = ‘1’,PLS < ‘3’) state. */ temp = xhci_readl(xhci, port_array[wIndex]); if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) || (temp & PORT_PLS_MASK) >= XDEV_U3) { xhci_warn(xhci, "USB core suspending device " "not in U0/U1/U2.\n"); goto error; } slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (!slot_id) { xhci_warn(xhci, "slot_id is zero\n"); goto error; } /* unlock to execute stop endpoint commands */ spin_unlock_irqrestore(&xhci->lock, flags); xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U3); spin_unlock_irqrestore(&xhci->lock, flags); msleep(10); /* wait device to enter */ spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, port_array[wIndex]); bus_state->suspended_ports |= 1 << wIndex; break; case USB_PORT_FEAT_LINK_STATE: temp = xhci_readl(xhci, port_array[wIndex]); /* Disable port */ if (link_state == USB_SS_PORT_LS_SS_DISABLED) { xhci_dbg(xhci, "Disable port %d\n", wIndex); temp = xhci_port_state_to_neutral(temp); /* * Clear all change bits, so that we get a new * connection event. */ temp |= PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | PORT_RC | PORT_PLC | PORT_CEC; xhci_writel(xhci, temp | PORT_PE, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]); break; } /* Put link in RxDetect (enable port) */ if (link_state == USB_SS_PORT_LS_RX_DETECT) { xhci_dbg(xhci, "Enable port %d\n", wIndex); xhci_set_link_state(xhci, port_array, wIndex, link_state); temp = xhci_readl(xhci, port_array[wIndex]); break; } /* Software should not attempt to set * port link state above '3' (U3) and the port * must be enabled. */ if ((temp & PORT_PE) == 0 || (link_state > USB_SS_PORT_LS_U3)) { xhci_warn(xhci, "Cannot set link state.\n"); goto error; } if (link_state == USB_SS_PORT_LS_U3) { slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (slot_id) { /* unlock to execute stop endpoint * commands */ spin_unlock_irqrestore(&xhci->lock, flags); xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } } xhci_set_link_state(xhci, port_array, wIndex, link_state); spin_unlock_irqrestore(&xhci->lock, flags); msleep(20); /* wait device to enter */ spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, port_array[wIndex]); if (link_state == USB_SS_PORT_LS_U3) bus_state->suspended_ports |= 1 << wIndex; break; case USB_PORT_FEAT_POWER: /* * Turn on ports, even if there isn't per-port switching. * HC will report connect events even before this is set. * However, khubd will ignore the roothub events until * the roothub is registered. */ xhci_writel(xhci, temp | PORT_POWER, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]); xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp); break; case USB_PORT_FEAT_RESET: temp = (temp | PORT_RESET); xhci_writel(xhci, temp, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]); xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); break; case USB_PORT_FEAT_BH_PORT_RESET: temp |= PORT_WR; xhci_writel(xhci, temp, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]); break; default: goto error; } /* unblock any posted writes */ temp = xhci_readl(xhci, port_array[wIndex]); break; case ClearPortFeature: if (!wIndex || wIndex > max_ports) goto error; wIndex--; temp = xhci_readl(xhci, port_array[wIndex]); if (temp == 0xffffffff) { retval = -ENODEV; break; } /* FIXME: What new port features do we need to support? */ temp = xhci_port_state_to_neutral(temp); switch (wValue) { case USB_PORT_FEAT_SUSPEND: temp = xhci_readl(xhci, port_array[wIndex]); xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n"); xhci_dbg(xhci, "PORTSC %04x\n", temp); if (temp & PORT_RESET) goto error; if ((temp & PORT_PLS_MASK) == XDEV_U3) { if ((temp & PORT_PE) == 0) goto error; xhci_set_link_state(xhci, port_array, wIndex, XDEV_RESUME); spin_unlock_irqrestore(&xhci->lock, flags); msleep(20); spin_lock_irqsave(&xhci->lock, flags); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); } bus_state->port_c_suspend |= 1 << wIndex; slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); goto error; } xhci_ring_device(xhci, slot_id); break; case USB_PORT_FEAT_C_SUSPEND: bus_state->port_c_suspend &= ~(1 << wIndex); case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_BH_PORT_RESET: case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_PORT_LINK_STATE: xhci_clear_port_change_bit(xhci, wValue, wIndex, port_array[wIndex], temp); break; case USB_PORT_FEAT_ENABLE: xhci_disable_port(hcd, xhci, wIndex, port_array[wIndex], temp); break; default: goto error; } break; default: error: /* "stall" on error */ retval = -EPIPE; } spin_unlock_irqrestore(&xhci->lock, flags); return retval; } /* * Returns 0 if the status hasn't changed, or the number of bytes in buf. * Ports are 0-indexed from the HCD point of view, * and 1-indexed from the USB core pointer of view. * * Note that the status change bits will be cleared as soon as a port status * change event is generated, so we use the saved status from that event. */ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) { unsigned long flags; u32 temp, status; u32 mask; int i, retval; struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports; __le32 __iomem **port_array; struct xhci_bus_state *bus_state; bool reset_change = false; max_ports = xhci_get_ports(hcd, &port_array); bus_state = &xhci->bus_state[hcd_index(hcd)]; /* Initial status is no changes */ retval = (max_ports + 8) / 8; memset(buf, 0, retval); status = 0; mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ for (i = 0; i < max_ports; i++) { temp = xhci_readl(xhci, port_array[i]); if (temp == 0xffffffff) { retval = -ENODEV; break; } if ((temp & mask) != 0 || (bus_state->port_c_suspend & 1 << i) || (bus_state->resume_done[i] && time_after_eq( jiffies, bus_state->resume_done[i]))) { buf[(i + 1) / 8] |= 1 << (i + 1) % 8; status = 1; } if ((temp & PORT_RC)) reset_change = true; } if (!status && !reset_change) { xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); } spin_unlock_irqrestore(&xhci->lock, flags); return status ? retval : 0; } #ifdef CONFIG_PM int xhci_bus_suspend(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports, port_index; __le32 __iomem **port_array; struct xhci_bus_state *bus_state; unsigned long flags; max_ports = xhci_get_ports(hcd, &port_array); bus_state = &xhci->bus_state[hcd_index(hcd)]; spin_lock_irqsave(&xhci->lock, flags); if (hcd->self.root_hub->do_remote_wakeup) { port_index = max_ports; while (port_index--) { if (bus_state->resume_done[port_index] != 0) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "suspend failed because " "port %d is resuming\n", port_index + 1); return -EBUSY; } } } port_index = max_ports; bus_state->bus_suspended = 0; while (port_index--) { /* suspend the port if the port is not suspended */ u32 t1, t2; int slot_id; t1 = xhci_readl(xhci, port_array[port_index]); t2 = xhci_port_state_to_neutral(t1); if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { xhci_dbg(xhci, "port %d not suspended\n", port_index); slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1); if (slot_id) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } t2 &= ~PORT_PLS_MASK; t2 |= PORT_LINK_STROBE | XDEV_U3; set_bit(port_index, &bus_state->bus_suspended); } if (hcd->self.root_hub->do_remote_wakeup) { if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; } else { t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } } else t2 &= ~PORT_WAKE_BITS; t1 = xhci_port_state_to_neutral(t1); if (t1 != t2) xhci_writel(xhci, t2, port_array[port_index]); } hcd->state = HC_STATE_SUSPENDED; bus_state->next_statechange = jiffies + msecs_to_jiffies(10); spin_unlock_irqrestore(&xhci->lock, flags); return 0; } int xhci_bus_resume(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports, port_index; __le32 __iomem **port_array; struct xhci_bus_state *bus_state; u32 temp; unsigned long flags; max_ports = xhci_get_ports(hcd, &port_array); bus_state = &xhci->bus_state[hcd_index(hcd)]; if (time_before(jiffies, bus_state->next_statechange)) msleep(5); spin_lock_irqsave(&xhci->lock, flags); if (!HCD_HW_ACCESSIBLE(hcd)) { spin_unlock_irqrestore(&xhci->lock, flags); return -ESHUTDOWN; } /* delay the irqs */ temp = xhci_readl(xhci, &xhci->op_regs->command); temp &= ~CMD_EIE; xhci_writel(xhci, temp, &xhci->op_regs->command); port_index = max_ports; while (port_index--) { /* Check whether need resume ports. If needed resume port and disable remote wakeup */ u32 temp; int slot_id; temp = xhci_readl(xhci, port_array[port_index]); if (DEV_SUPERSPEED(temp)) temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); else temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(port_index, &bus_state->bus_suspended) && (temp & PORT_PLS_MASK)) { if (DEV_SUPERSPEED(temp)) { xhci_set_link_state(xhci, port_array, port_index, XDEV_U0); } else { xhci_set_link_state(xhci, port_array, port_index, XDEV_RESUME); spin_unlock_irqrestore(&xhci->lock, flags); msleep(20); spin_lock_irqsave(&xhci->lock, flags); xhci_set_link_state(xhci, port_array, port_index, XDEV_U0); } /* wait for the port to enter U0 and report port link * state change. */ spin_unlock_irqrestore(&xhci->lock, flags); msleep(20); spin_lock_irqsave(&xhci->lock, flags); /* Clear PLC */ xhci_test_and_clear_bit(xhci, port_array, port_index, PORT_PLC); slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1); if (slot_id) xhci_ring_device(xhci, slot_id); } else xhci_writel(xhci, temp, port_array[port_index]); } (void) xhci_readl(xhci, &xhci->op_regs->command); bus_state->next_statechange = jiffies + msecs_to_jiffies(5); /* re-enable irqs */ temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= CMD_EIE; xhci_writel(xhci, temp, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->op_regs->command); spin_unlock_irqrestore(&xhci->lock, flags); return 0; } #endif /* CONFIG_PM */
Java
/* * Copyright (C) 2008-2010 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Comment: The event with the Living Mojos is not implemented, just is done that when one of the mojos around the boss take damage will make the boss enter in combat! */ #include "ScriptPCH.h" #include "gundrak.h" enum Spells { SPELL_EMERGE = 54850, SPELL_MIGHTY_BLOW = 54719, SPELL_MERGE = 54878, SPELL_SURGE = 54801, SPELL_FREEZE_ANIM = 16245, SPELL_MOJO_PUDDLE = 55627, H_SPELL_MOJO_PUDDLE = 58994, SPELL_MOJO_WAVE = 55626, H_SPELL_MOJO_WAVE = 58993 }; struct boss_drakkari_colossusAI : public ScriptedAI { boss_drakkari_colossusAI(Creature* pCreature) : ScriptedAI(pCreature) { pInstance = pCreature->GetInstanceData(); } ScriptedInstance* pInstance; bool bHealth; bool bHealth1; uint32 MightyBlowTimer; void Reset() { if (pInstance) pInstance->SetData(DATA_DRAKKARI_COLOSSUS_EVENT, NOT_STARTED); if (!me->HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE)) me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); me->clearUnitState(UNIT_STAT_STUNNED | UNIT_STAT_ROOT); me->SetReactState(REACT_PASSIVE); MightyBlowTimer = 10*IN_MILLISECONDS; bHealth = false; bHealth1 = false; } void EnterCombat(Unit* /*who*/) { if (pInstance) pInstance->SetData(DATA_DRAKKARI_COLOSSUS_EVENT, IN_PROGRESS); } void CreatureState(Creature* pWho, bool bRestore = false) { if (!pWho) return; if (bRestore) { pWho->clearUnitState(UNIT_STAT_STUNNED | UNIT_STAT_ROOT); pWho->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); if (pWho == me) me->RemoveAura(SPELL_FREEZE_ANIM); }else { pWho->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); pWho->addUnitState(UNIT_STAT_STUNNED | UNIT_STAT_ROOT); if (pWho == me) DoCast(me,SPELL_FREEZE_ANIM); } } void UpdateAI(const uint32 diff) { //Return since we have no target if (!UpdateVictim()) return; if (!bHealth && HealthBelowPct(50) && !HealthBelowPct(5)) { CreatureState(me, false); DoCast(me,SPELL_FREEZE_ANIM); DoCast(me,SPELL_EMERGE); bHealth = true; } if (!bHealth1 && HealthBelowPct(5)) { DoCast(me,SPELL_EMERGE); CreatureState(me, false); bHealth1 = true; me->RemoveAllAuras(); } if (MightyBlowTimer <= diff) { DoCast(me->getVictim(), SPELL_MIGHTY_BLOW, true); MightyBlowTimer = 10*IN_MILLISECONDS; } else MightyBlowTimer -= diff; if (!me->hasUnitState(UNIT_STAT_STUNNED)) DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) { if (pInstance) pInstance->SetData(DATA_DRAKKARI_COLOSSUS_EVENT, DONE); } void JustSummoned(Creature* pSummon) { if (HealthBelowPct(5)) pSummon->DealDamage(pSummon, pSummon->GetHealth() * 0.5 , NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); pSummon->AI()->AttackStart(me->getVictim()); } }; struct boss_drakkari_elementalAI : public ScriptedAI { boss_drakkari_elementalAI(Creature* pCreature) : ScriptedAI(pCreature) { pInstance = pCreature->GetInstanceData(); } ScriptedInstance* pInstance; uint32 uiSurgeTimer; bool bGoToColossus; void Reset() { if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) CAST_AI(boss_drakkari_colossusAI, pColossus->AI())->CreatureState(me, true); uiSurgeTimer = 7*IN_MILLISECONDS; bGoToColossus = false; } void EnterEvadeMode() { me->RemoveFromWorld(); } void MovementInform(uint32 uiType, uint32 /*uiId*/) { if (uiType != POINT_MOTION_TYPE) return; if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) { CAST_AI(boss_drakkari_colossusAI, pColossus->AI())->CreatureState(pColossus, true); CAST_AI(boss_drakkari_colossusAI, pColossus->AI())->bHealth1 = false; } me->RemoveFromWorld(); } void UpdateAI(const uint32 diff) { //Return since we have no target if (!UpdateVictim()) return; if (!bGoToColossus && HealthBelowPct(50)) { if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) { if (!CAST_AI(boss_drakkari_colossusAI,pColossus->AI())->HealthBelowPct(6)) { me->InterruptNonMeleeSpells(true); DoCast(pColossus, SPELL_MERGE); bGoToColossus = true; } } } if (uiSurgeTimer <= diff) { DoCast(me->getVictim(), SPELL_SURGE); uiSurgeTimer = 7*IN_MILLISECONDS; } else uiSurgeTimer -= diff; DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) { if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) pColossus->Kill(pColossus); } }; struct npc_living_mojoAI : public ScriptedAI { npc_living_mojoAI(Creature* pCreature) : ScriptedAI(pCreature) { pInstance = pCreature->GetInstanceData(); } ScriptedInstance* pInstance; uint32 uiMojoWaveTimer; uint32 uiMojoPuddleTimer; void Reset() { uiMojoWaveTimer = 2*IN_MILLISECONDS; uiMojoPuddleTimer = 7*IN_MILLISECONDS; } void EnterCombat(Unit* /*who*/) { //Check if the npc is near of Drakkari Colossus. if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) { if (pColossus->isAlive() && me->IsInRange3d(pColossus->GetHomePosition().GetPositionX(),pColossus->GetHomePosition().GetPositionY(),pColossus->GetHomePosition().GetPositionZ(),0.0f,17.0f)) me->SetReactState(REACT_PASSIVE); else me->SetReactState(REACT_AGGRESSIVE); } } void DamageTaken(Unit* pDone_by, uint32& /*uiDamage*/) { if (me->HasReactState(REACT_PASSIVE)) { if (Creature *pColossus = Unit::GetCreature(*me, pInstance ? pInstance->GetData64(DATA_DRAKKARI_COLOSSUS) : 0)) { if (pColossus->isAlive() && !pColossus->isInCombat()) { pColossus->RemoveAura(SPELL_FREEZE_ANIM); pColossus->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE); pColossus->SetReactState(REACT_AGGRESSIVE); if (pDone_by && pDone_by->isAlive()) pColossus->AI()->AttackStart(pDone_by); EnterEvadeMode(); } } } } void UpdateAI(const uint32 diff) { //Return since we have no target if (!UpdateVictim()) return; if (uiMojoWaveTimer <= diff) { DoCast(me->getVictim(), SPELL_MOJO_WAVE); uiMojoWaveTimer = 15*IN_MILLISECONDS; } else uiMojoWaveTimer -= diff; if (uiMojoPuddleTimer <= diff) { DoCast(me->getVictim(), SPELL_MOJO_PUDDLE); uiMojoPuddleTimer = 18*IN_MILLISECONDS; } else uiMojoPuddleTimer -= diff; DoMeleeAttackIfReady(); } }; CreatureAI* GetAI_boss_drakkari_colossus(Creature* pCreature) { return new boss_drakkari_colossusAI (pCreature); } CreatureAI* GetAI_boss_drakkari_elemental(Creature* pCreature) { return new boss_drakkari_elementalAI (pCreature); } CreatureAI* GetAI_npc_living_mojo(Creature* pCreature) { return new npc_living_mojoAI (pCreature); } void AddSC_boss_drakkari_colossus() { Script* newscript; newscript = new Script; newscript->Name = "boss_drakkari_colossus"; newscript->GetAI = &GetAI_boss_drakkari_colossus; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "boss_drakkari_elemental"; newscript->GetAI = &GetAI_boss_drakkari_elemental; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "npc_living_mojo"; newscript->GetAI = &GetAI_npc_living_mojo; newscript->RegisterSelf(); }
Java
/****************************************************************************** ** $Id$ ** ** Copyright (C) 2006-2007 ascolab GmbH. All Rights Reserved. ** Web: http://www.ascolab.com ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the GNU General Public License ** as published by the Free Software Foundation; either version 2 ** of the License, or (at your option) any later version. ** ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ** ** Project: OpcUa Wireshark Plugin ** ** Description: OpcUa Application Layer Decoder. ** ** Author: Gerhard Gappmeier <[email protected]> ** Last change by: $Author: gergap $ ** ******************************************************************************/ #include "config.h" #include <glib.h> #include <epan/packet.h> #include "opcua_simpletypes.h" #include "opcua_application_layer.h" /** NodeId encoding mask table */ static const value_string g_nodeidmasks[] = { { 0, "Two byte encoded Numeric" }, { 1, "Four byte encoded Numeric" }, { 2, "Numeric of arbitrary length" }, { 3, "String" }, { 4, "URI" }, { 5, "GUID" }, { 6, "ByteString" }, { 0x80, "UriMask" }, { 0, NULL } }; /** Service type table */ extern const value_string g_requesttypes[]; static int hf_opcua_nodeid_encodingmask = -1; static int hf_opcua_app_nsid = -1; static int hf_opcua_app_numeric = -1; /** Register application layer types. */ void registerApplicationLayerTypes(int proto) { /** header field definitions */ static hf_register_info hf[] = { { &hf_opcua_nodeid_encodingmask, { "NodeId EncodingMask", "application.nodeid.encodingmask", FT_UINT8, BASE_HEX, VALS(g_nodeidmasks), 0x0, NULL, HFILL } }, { &hf_opcua_app_nsid, { "NodeId EncodingMask", "application.nodeid.nsid", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_opcua_app_numeric, { "NodeId Identifier Numeric", "application.nodeid.numeric", FT_UINT32, BASE_DEC, VALS(g_requesttypes), 0x0, NULL, HFILL } } }; proto_register_field_array(proto, hf, array_length(hf)); } /** Parses an OpcUa Service NodeId and returns the service type. * In this cases the NodeId is always from type numeric and NSId = 0. */ int parseServiceNodeId(proto_tree *tree, tvbuff_t *tvb, gint *pOffset) { gint iOffset = *pOffset; guint8 EncodingMask; guint32 Numeric = 0; EncodingMask = tvb_get_guint8(tvb, iOffset); proto_tree_add_item(tree, hf_opcua_nodeid_encodingmask, tvb, iOffset, 1, ENC_LITTLE_ENDIAN); iOffset++; switch(EncodingMask) { case 0x00: /* two byte node id */ Numeric = tvb_get_guint8(tvb, iOffset); proto_tree_add_item(tree, hf_opcua_app_numeric, tvb, iOffset, 1, ENC_LITTLE_ENDIAN); iOffset+=1; break; case 0x01: /* four byte node id */ proto_tree_add_item(tree, hf_opcua_app_nsid, tvb, iOffset, 1, ENC_LITTLE_ENDIAN); iOffset+=1; Numeric = tvb_get_letohs(tvb, iOffset); proto_tree_add_item(tree, hf_opcua_app_numeric, tvb, iOffset, 2, ENC_LITTLE_ENDIAN); iOffset+=2; break; case 0x02: /* numeric, that does not fit into four bytes */ proto_tree_add_item(tree, hf_opcua_app_nsid, tvb, iOffset, 4, ENC_LITTLE_ENDIAN); iOffset+=4; Numeric = tvb_get_letohl(tvb, iOffset); proto_tree_add_item(tree, hf_opcua_app_numeric, tvb, iOffset, 4, ENC_LITTLE_ENDIAN); iOffset+=4; break; case 0x03: /* string */ case 0x04: /* uri */ case 0x05: /* guid */ case 0x06: /* byte string */ /* NOT USED */ break; }; *pOffset = iOffset; return Numeric; }
Java
/* Target-dependent code for SPARC. Copyright (C) 2003-2016 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef SPARC_TDEP_H #define SPARC_TDEP_H 1 struct frame_info; struct gdbarch; struct regcache; struct regset; struct trad_frame_saved_reg; /* Register offsets for the general-purpose register set. */ struct sparc_gregmap { int r_psr_offset; int r_pc_offset; int r_npc_offset; int r_y_offset; int r_wim_offset; int r_tbr_offset; int r_g1_offset; int r_l0_offset; int r_y_size; }; struct sparc_fpregmap { int r_f0_offset; int r_fsr_offset; }; /* SPARC architecture-specific information. */ struct gdbarch_tdep { /* Register numbers for the PN and nPC registers. The definitions for (64-bit) UltraSPARC differ from the (32-bit) SPARC definitions. */ int pc_regnum; int npc_regnum; /* Register sets. */ const struct regset *gregset; size_t sizeof_gregset; const struct regset *fpregset; size_t sizeof_fpregset; /* Offset of saved PC in jmp_buf. */ int jb_pc_offset; /* Size of an Procedure Linkage Table (PLT) entry, 0 if we shouldn't treat the PLT special when doing prologue analysis. */ size_t plt_entry_size; /* Alternative location for trap return. Used for single-stepping. */ CORE_ADDR (*step_trap) (struct frame_info *frame, unsigned long insn); /* ISA-specific data types. */ struct type *sparc_psr_type; struct type *sparc_fsr_type; struct type *sparc64_pstate_type; struct type *sparc64_fsr_type; struct type *sparc64_fprs_type; }; /* Register numbers of various important registers. */ enum sparc_regnum { SPARC_G0_REGNUM, /* %g0 */ SPARC_G1_REGNUM, SPARC_G2_REGNUM, SPARC_G3_REGNUM, SPARC_G4_REGNUM, SPARC_G5_REGNUM, SPARC_G6_REGNUM, SPARC_G7_REGNUM, /* %g7 */ SPARC_O0_REGNUM, /* %o0 */ SPARC_O1_REGNUM, SPARC_O2_REGNUM, SPARC_O3_REGNUM, SPARC_O4_REGNUM, SPARC_O5_REGNUM, SPARC_SP_REGNUM, /* %sp (%o6) */ SPARC_O7_REGNUM, /* %o7 */ SPARC_L0_REGNUM, /* %l0 */ SPARC_L1_REGNUM, SPARC_L2_REGNUM, SPARC_L3_REGNUM, SPARC_L4_REGNUM, SPARC_L5_REGNUM, SPARC_L6_REGNUM, SPARC_L7_REGNUM, /* %l7 */ SPARC_I0_REGNUM, /* %i0 */ SPARC_I1_REGNUM, SPARC_I2_REGNUM, SPARC_I3_REGNUM, SPARC_I4_REGNUM, SPARC_I5_REGNUM, SPARC_FP_REGNUM, /* %fp (%i6) */ SPARC_I7_REGNUM, /* %i7 */ SPARC_F0_REGNUM, /* %f0 */ SPARC_F1_REGNUM, SPARC_F2_REGNUM, SPARC_F3_REGNUM, SPARC_F4_REGNUM, SPARC_F5_REGNUM, SPARC_F6_REGNUM, SPARC_F7_REGNUM, SPARC_F31_REGNUM /* %f31 */ = SPARC_F0_REGNUM + 31 }; enum sparc32_regnum { SPARC32_Y_REGNUM /* %y */ = SPARC_F31_REGNUM + 1, SPARC32_PSR_REGNUM, /* %psr */ SPARC32_WIM_REGNUM, /* %wim */ SPARC32_TBR_REGNUM, /* %tbr */ SPARC32_PC_REGNUM, /* %pc */ SPARC32_NPC_REGNUM, /* %npc */ SPARC32_FSR_REGNUM, /* %fsr */ SPARC32_CSR_REGNUM, /* %csr */ /* Pseudo registers. */ SPARC32_D0_REGNUM, /* %d0 */ SPARC32_D30_REGNUM /* %d30 */ = SPARC32_D0_REGNUM + 15 }; struct sparc_frame_cache { /* Base address. */ CORE_ADDR base; CORE_ADDR pc; /* Do we have a frame? */ int frameless_p; /* The offset from the base register to the CFA. */ int frame_offset; /* Mask of `local' and `in' registers saved in the register save area. */ unsigned short int saved_regs_mask; /* Mask of `out' registers copied or renamed to their `in' sibling. */ unsigned char copied_regs_mask; /* Do we have a Structure, Union or Quad-Precision return value? */ int struct_return_p; /* Table of saved registers. */ struct trad_frame_saved_reg *saved_regs; }; /* Fetch the instruction at PC. */ extern unsigned long sparc_fetch_instruction (CORE_ADDR pc); /* Fetch StackGhost Per-Process XOR cookie. */ extern ULONGEST sparc_fetch_wcookie (struct gdbarch *gdbarch); /* Record the effect of a SAVE instruction on CACHE. */ extern void sparc_record_save_insn (struct sparc_frame_cache *cache); /* Do a full analysis of the prologue at PC and update CACHE accordingly. */ extern CORE_ADDR sparc_analyze_prologue (struct gdbarch *gdbarch, CORE_ADDR pc, CORE_ADDR current_pc, struct sparc_frame_cache *cache); extern struct sparc_frame_cache * sparc_frame_cache (struct frame_info *this_frame, void **this_cache); extern struct sparc_frame_cache * sparc32_frame_cache (struct frame_info *this_frame, void **this_cache); extern int sparc_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc); extern void sparc_supply_rwindow (struct regcache *regcache, CORE_ADDR sp, int regnum); extern void sparc_collect_rwindow (const struct regcache *regcache, CORE_ADDR sp, int regnum); /* Register offsets for SunOS 4. */ extern const struct sparc_gregmap sparc32_sunos4_gregmap; extern const struct sparc_fpregmap sparc32_sunos4_fpregmap; extern const struct sparc_fpregmap sparc32_bsd_fpregmap; extern void sparc32_supply_gregset (const struct sparc_gregmap *gregmap, struct regcache *regcache, int regnum, const void *gregs); extern void sparc32_collect_gregset (const struct sparc_gregmap *gregmap, const struct regcache *regcache, int regnum, void *gregs); extern void sparc32_supply_fpregset (const struct sparc_fpregmap *fpregmap, struct regcache *regcache, int regnum, const void *fpregs); extern void sparc32_collect_fpregset (const struct sparc_fpregmap *fpregmap, const struct regcache *regcache, int regnum, void *fpregs); extern int sparc_is_annulled_branch_insn (CORE_ADDR pc); /* Functions and variables exported from sparc-sol2-tdep.c. */ /* Register offsets for Solaris 2. */ extern const struct sparc_gregmap sparc32_sol2_gregmap; extern const struct sparc_fpregmap sparc32_sol2_fpregmap; extern int sparc_sol2_pc_in_sigtramp (CORE_ADDR pc, const char *name); extern const char *sparc_sol2_static_transform_name (const char *name); extern void sparc32_sol2_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch); /* Functions and variables exported from sparcnbsd-tdep.c. */ /* Register offsets for NetBSD. */ extern const struct sparc_gregmap sparc32nbsd_gregmap; /* Return the address of a system call's alternative return address. */ extern CORE_ADDR sparcnbsd_step_trap (struct frame_info *frame, unsigned long insn); extern void sparc32nbsd_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch); extern struct trad_frame_saved_reg * sparc32nbsd_sigcontext_saved_regs (struct frame_info *next_frame); #endif /* sparc-tdep.h */
Java
/* * drivers/video/radeonfb.c * framebuffer driver for ATI Radeon chipset video boards * * Copyright 2000 Ani Joshi <[email protected]> * * * ChangeLog: * 2000-08-03 initial version 0.0.1 * 2000-09-10 more bug fixes, public release 0.0.5 * 2001-02-19 mode bug fixes, 0.0.7 * 2001-07-05 fixed scrolling issues, engine initialization, * and minor mode tweaking, 0.0.9 * 2001-09-07 Radeon VE support, Nick Kurshev * blanking, pan_display, and cmap fixes, 0.1.0 * 2001-10-10 Radeon 7500 and 8500 support, and experimental * flat panel support, 0.1.1 * 2001-11-17 Radeon M6 (ppc) support, Daniel Berlin, 0.1.2 * 2001-11-18 DFP fixes, Kevin Hendricks, 0.1.3 * 2001-11-29 more cmap, backlight fixes, Benjamin Herrenschmidt * 2002-01-18 DFP panel detection via BIOS, Michael Clark, 0.1.4 * 2002-06-02 console switching, mode set fixes, accel fixes * 2002-06-03 MTRR support, Peter Horton, 0.1.5 * 2002-09-21 rv250, r300, m9 initial support, * added mirror option, 0.1.6 * * Special thanks to ATI DevRel team for their hardware donations. * */ #define RADEON_VERSION "0.1.6" #include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/uaccess.h> #if defined(__powerpc__) #include <asm/prom.h> #include <asm/pci-bridge.h> #include "macmodes.h" #ifdef CONFIG_NVRAM #include <linux/nvram.h> #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_BOOTX_TEXT #include <asm/btext.h> #endif #ifdef CONFIG_ADB_PMU #include <linux/adb.h> #include <linux/pmu.h> #endif #endif /* __powerpc__ */ #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/radeon.h> #include <linux/radeonfb.h> #define DEBUG 1 #if DEBUG #define RTRACE printk #else #define RTRACE if(0) printk #endif // XXX #undef CONFIG_PMAC_PBOOK enum radeon_chips { RADEON_QD, RADEON_QE, RADEON_QF, RADEON_QG, RADEON_QY, RADEON_QZ, RADEON_LW, RADEON_LX, RADEON_LY, RADEON_LZ, RADEON_QL, RADEON_QN, RADEON_QO, RADEON_Ql, RADEON_BB, RADEON_QW, RADEON_QX, RADEON_Id, RADEON_Ie, RADEON_If, RADEON_Ig, RADEON_Ya, RADEON_Yd, RADEON_Ld, RADEON_Le, RADEON_Lf, RADEON_Lg, RADEON_ND, RADEON_NE, RADEON_NF, RADEON_NG, RADEON_QM }; enum radeon_arch { RADEON_R100, RADEON_RV100, RADEON_R200, RADEON_RV200, RADEON_RV250, RADEON_R300, RADEON_M6, RADEON_M7, RADEON_M9 }; static struct radeon_chip_info { const char *name; unsigned char arch; } radeon_chip_info[] __devinitdata = { { "QD", RADEON_R100 }, { "QE", RADEON_R100 }, { "QF", RADEON_R100 }, { "QG", RADEON_R100 }, { "VE QY", RADEON_RV100 }, { "VE QZ", RADEON_RV100 }, { "M7 LW", RADEON_M7 }, { "M7 LX", RADEON_M7 }, { "M6 LY", RADEON_M6 }, { "M6 LZ", RADEON_M6 }, { "8500 QL", RADEON_R200 }, { "8500 QN", RADEON_R200 }, { "8500 QO", RADEON_R200 }, { "8500 Ql", RADEON_R200 }, { "8500 BB", RADEON_R200 }, { "7500 QW", RADEON_RV200 }, { "7500 QX", RADEON_RV200 }, { "9000 Id", RADEON_RV250 }, { "9000 Ie", RADEON_RV250 }, { "9000 If", RADEON_RV250 }, { "9000 Ig", RADEON_RV250 }, { "M9 Ld", RADEON_M9 }, { "M9 Le", RADEON_M9 }, { "M9 Lf", RADEON_M9 }, { "M9 Lg", RADEON_M9 }, { "9700 ND", RADEON_R300 }, { "9700 NE", RADEON_R300 }, { "9700 NF", RADEON_R300 }, { "9700 NG", RADEON_R300 }, { "9100 QM", RADEON_R200 } }; enum radeon_montype { MT_NONE, MT_CRT, /* CRT */ MT_LCD, /* LCD */ MT_DFP, /* DVI */ MT_CTV, /* composite TV */ MT_STV /* S-Video out */ }; static struct pci_device_id radeonfb_pci_table[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QD}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QE}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QF}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QG}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QY}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QZ}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LW}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LX}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LY}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LZ}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QL}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QN}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QO}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ql, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ql}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_BB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_BB}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QW}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QX}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Id}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ie, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ie}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_If, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_If}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ig, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ig}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ya, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ya}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Yd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Yd}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ld, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ld}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Le, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Le}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lf}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lg, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lg}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_ND, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_ND}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NE}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NF}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NG}, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QM}, { 0, } }; MODULE_DEVICE_TABLE(pci, radeonfb_pci_table); typedef struct { u16 reg; u32 val; } reg_val; /* these common regs are cleared before mode setting so they do not * interfere with anything */ static reg_val common_regs[] = { { OVR_CLR, 0 }, { OVR_WID_LEFT_RIGHT, 0 }, { OVR_WID_TOP_BOTTOM, 0 }, { OV0_SCALE_CNTL, 0 }, { SUBPIC_CNTL, 0 }, { VIPH_CONTROL, 0 }, { I2C_CNTL_1, 0 }, { GEN_INT_CNTL, 0 }, { CAP0_TRIG_CNTL, 0 }, }; static reg_val common_regs_m6[] = { { OVR_CLR, 0 }, { OVR_WID_LEFT_RIGHT, 0 }, { OVR_WID_TOP_BOTTOM, 0 }, { OV0_SCALE_CNTL, 0 }, { SUBPIC_CNTL, 0 }, { GEN_INT_CNTL, 0 }, { CAP0_TRIG_CNTL, 0 } }; typedef struct { u8 clock_chip_type; u8 struct_size; u8 accelerator_entry; u8 VGA_entry; u16 VGA_table_offset; u16 POST_table_offset; u16 XCLK; u16 MCLK; u8 num_PLL_blocks; u8 size_PLL_blocks; u16 PCLK_ref_freq; u16 PCLK_ref_divider; u32 PCLK_min_freq; u32 PCLK_max_freq; u16 MCLK_ref_freq; u16 MCLK_ref_divider; u32 MCLK_min_freq; u32 MCLK_max_freq; u16 XCLK_ref_freq; u16 XCLK_ref_divider; u32 XCLK_min_freq; u32 XCLK_max_freq; } __attribute__ ((packed)) PLL_BLOCK; struct pll_info { int ppll_max; int ppll_min; int xclk; int ref_div; int ref_clk; }; struct ram_info { int ml; int mb; int trcd; int trp; int twr; int cl; int tr2w; int loop_latency; int rloop; }; struct radeon_regs { /* CRTC regs */ u32 crtc_h_total_disp; u32 crtc_h_sync_strt_wid; u32 crtc_v_total_disp; u32 crtc_v_sync_strt_wid; u32 crtc_pitch; u32 crtc_gen_cntl; u32 crtc_ext_cntl; u32 dac_cntl; u32 flags; u32 pix_clock; int xres, yres; /* DDA regs */ u32 dda_config; u32 dda_on_off; /* PLL regs */ u32 ppll_div_3; u32 ppll_ref_div; u32 vclk_ecp_cntl; /* Flat panel regs */ u32 fp_crtc_h_total_disp; u32 fp_crtc_v_total_disp; u32 fp_gen_cntl; u32 fp_h_sync_strt_wid; u32 fp_horz_stretch; u32 fp_panel_cntl; u32 fp_v_sync_strt_wid; u32 fp_vert_stretch; u32 lvds_gen_cntl; u32 lvds_pll_cntl; u32 tmds_crc; u32 tmds_transmitter_cntl; #if defined(__BIG_ENDIAN) u32 surface_cntl; #endif }; struct radeonfb_info { struct fb_info info; struct radeon_regs state; struct radeon_regs init_state; char name[32]; char ram_type[12]; unsigned long mmio_base_phys; unsigned long fb_base_phys; void __iomem *mmio_base; void __iomem *fb_base; struct pci_dev *pdev; unsigned char *EDID; unsigned char __iomem *bios_seg; u32 pseudo_palette[17]; struct { u8 red, green, blue, pad; } palette[256]; int chipset; unsigned char arch; int video_ram; u8 rev; int pitch, bpp, depth; int xres, yres, pixclock; int xres_virtual, yres_virtual; u32 accel_flags; int use_default_var; int got_dfpinfo; int hasCRTC2; int crtDisp_type; int dviDisp_type; int panel_xres, panel_yres; int clock; int hOver_plus, hSync_width, hblank; int vOver_plus, vSync_width, vblank; int hAct_high, vAct_high, interlaced; int synct, misc; u32 dp_gui_master_cntl; struct pll_info pll; int pll_output_freq, post_div, fb_div; struct ram_info ram; int mtrr_hdl; #ifdef CONFIG_PMAC_PBOOK int pm_reg; u32 save_regs[64]; u32 mdll, mdll2; #endif /* CONFIG_PMAC_PBOOK */ int asleep; struct radeonfb_info *next; }; static struct fb_var_screeninfo radeonfb_default_var = { 640, 480, 640, 480, 0, 0, 8, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }; /* * IO macros */ #define INREG8(addr) readb((rinfo->mmio_base)+addr) #define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) #define INREG(addr) readl((rinfo->mmio_base)+addr) #define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) #define OUTPLL(addr,val) \ do { \ OUTREG8(CLOCK_CNTL_INDEX, (addr & 0x0000003f) | 0x00000080); \ OUTREG(CLOCK_CNTL_DATA, val); \ } while(0) #define OUTPLLP(addr,val,mask) \ do { \ unsigned int _tmp = INPLL(addr); \ _tmp &= (mask); \ _tmp |= (val); \ OUTPLL(addr, _tmp); \ } while (0) #define OUTREGP(addr,val,mask) \ do { \ unsigned int _tmp = INREG(addr); \ _tmp &= (mask); \ _tmp |= (val); \ OUTREG(addr, _tmp); \ } while (0) static __inline__ u32 _INPLL(struct radeonfb_info *rinfo, u32 addr) { OUTREG8(CLOCK_CNTL_INDEX, addr & 0x0000003f); return (INREG(CLOCK_CNTL_DATA)); } #define INPLL(addr) _INPLL(rinfo, addr) #define PRIMARY_MONITOR(rinfo) ((rinfo->dviDisp_type != MT_NONE) && \ (rinfo->dviDisp_type != MT_STV) && \ (rinfo->dviDisp_type != MT_CTV) ? \ rinfo->dviDisp_type : rinfo->crtDisp_type) static char *GET_MON_NAME(int type) { char *pret = NULL; switch (type) { case MT_NONE: pret = "no"; break; case MT_CRT: pret = "CRT"; break; case MT_DFP: pret = "DFP"; break; case MT_LCD: pret = "LCD"; break; case MT_CTV: pret = "CTV"; break; case MT_STV: pret = "STV"; break; } return pret; } /* * 2D engine routines */ static __inline__ void radeon_engine_flush (struct radeonfb_info *rinfo) { int i; /* initiate flush */ OUTREGP(RB2D_DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL, ~RB2D_DC_FLUSH_ALL); for (i=0; i < 2000000; i++) { if (!(INREG(RB2D_DSTCACHE_CTLSTAT) & RB2D_DC_BUSY)) break; } } static __inline__ void _radeon_fifo_wait (struct radeonfb_info *rinfo, int entries) { int i; for (i=0; i<2000000; i++) if ((INREG(RBBM_STATUS) & 0x7f) >= entries) return; } static __inline__ void _radeon_engine_idle (struct radeonfb_info *rinfo) { int i; /* ensure FIFO is empty before waiting for idle */ _radeon_fifo_wait (rinfo, 64); for (i=0; i<2000000; i++) { if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) { radeon_engine_flush (rinfo); return; } } } #define radeon_engine_idle() _radeon_engine_idle(rinfo) #define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries) /* * helper routines */ static __inline__ u32 radeon_get_dstbpp(u16 depth) { switch (depth) { case 8: return DST_8BPP; case 15: return DST_15BPP; case 16: return DST_16BPP; case 32: return DST_32BPP; default: return 0; } } static inline int var_to_depth(const struct fb_var_screeninfo *var) { if (var->bits_per_pixel != 16) return var->bits_per_pixel; return (var->green.length == 6) ? 16 : 15; } static void _radeon_engine_reset(struct radeonfb_info *rinfo) { u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; radeon_engine_flush (rinfo); clock_cntl_index = INREG(CLOCK_CNTL_INDEX); mclk_cntl = INPLL(MCLK_CNTL); OUTPLL(MCLK_CNTL, (mclk_cntl | FORCEON_MCLKA | FORCEON_MCLKB | FORCEON_YCLKA | FORCEON_YCLKB | FORCEON_MC | FORCEON_AIC)); rbbm_soft_reset = INREG(RBBM_SOFT_RESET); OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset | SOFT_RESET_CP | SOFT_RESET_HI | SOFT_RESET_SE | SOFT_RESET_RE | SOFT_RESET_PP | SOFT_RESET_E2 | SOFT_RESET_RB); INREG(RBBM_SOFT_RESET); OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset & (u32) ~(SOFT_RESET_CP | SOFT_RESET_HI | SOFT_RESET_SE | SOFT_RESET_RE | SOFT_RESET_PP | SOFT_RESET_E2 | SOFT_RESET_RB)); INREG(RBBM_SOFT_RESET); OUTPLL(MCLK_CNTL, mclk_cntl); OUTREG(CLOCK_CNTL_INDEX, clock_cntl_index); OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset); return; } #define radeon_engine_reset() _radeon_engine_reset(rinfo) static __inline__ u8 radeon_get_post_div_bitval(int post_div) { switch (post_div) { case 1: return 0x00; case 2: return 0x01; case 3: return 0x04; case 4: return 0x02; case 6: return 0x06; case 8: return 0x03; case 12: return 0x07; default: return 0x02; } } static __inline__ int round_div(int num, int den) { return (num + (den / 2)) / den; } static __inline__ int min_bits_req(int val) { int bits_req = 0; if (val == 0) bits_req = 1; while (val) { val >>= 1; bits_req++; } return (bits_req); } static __inline__ int _max(int val1, int val2) { if (val1 >= val2) return val1; else return val2; } /* * globals */ static char *mode_option __initdata; static char noaccel = 0; static char mirror = 0; static int panel_yres __initdata = 0; static char force_dfp __initdata = 0; static struct radeonfb_info *board_list = NULL; static char nomtrr __initdata = 0; /* * prototypes */ static void radeon_save_state (struct radeonfb_info *rinfo, struct radeon_regs *save); static void radeon_engine_init (struct radeonfb_info *rinfo); static void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode); static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo); static int __devinit radeon_init_disp (struct radeonfb_info *rinfo); static int radeon_init_disp_var (struct radeonfb_info *rinfo, struct fb_var_screeninfo *var); static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo); static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg); static void radeon_get_moninfo (struct radeonfb_info *rinfo); static int radeon_get_dfpinfo (struct radeonfb_info *rinfo); static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo); static void radeon_get_EDID(struct radeonfb_info *rinfo); static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo); static void radeon_update_default_var(struct radeonfb_info *rinfo); #ifdef CONFIG_PPC_OF static int radeon_read_OF (struct radeonfb_info *rinfo); static int radeon_get_EDID_OF(struct radeonfb_info *rinfo); extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev); #ifdef CONFIG_PMAC_PBOOK int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when); static struct pmu_sleep_notifier radeon_sleep_notifier = { radeon_sleep_notify, SLEEP_LEVEL_VIDEO, }; #endif /* CONFIG_PMAC_PBOOK */ #ifdef CONFIG_PMAC_BACKLIGHT static int radeon_set_backlight_enable(int on, int level, void *data); static int radeon_set_backlight_level(int level, void *data); static struct backlight_controller radeon_backlight_controller = { radeon_set_backlight_enable, radeon_set_backlight_level }; #endif /* CONFIG_PMAC_BACKLIGHT */ #endif /* CONFIG_PPC_OF */ static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo) { #if defined(__i386__) u32 segstart; char __iomem *rom_base; char __iomem *rom; int stage; int i,j; char aty_rom_sig[] = "761295520"; char *radeon_sig[] = { "RG6", "RADEON" }; for(segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) { stage = 1; rom_base = ioremap(segstart, 0x1000); if ((*rom_base == 0x55) && (((*(rom_base + 1)) & 0xff) == 0xaa)) stage = 2; if (stage != 2) { iounmap(rom_base); continue; } rom = rom_base; for (i = 0; (i < 128 - strlen(aty_rom_sig)) && (stage != 3); i++) { if (aty_rom_sig[0] == *rom) if (strncmp(aty_rom_sig, rom, strlen(aty_rom_sig)) == 0) stage = 3; rom++; } if (stage != 3) { iounmap(rom_base); continue; } rom = rom_base; for (i = 0; (i < 512) && (stage != 4); i++) { for(j = 0;j < sizeof(radeon_sig)/sizeof(char *);j++) { if (radeon_sig[j][0] == *rom) if (strncmp(radeon_sig[j], rom, strlen(radeon_sig[j])) == 0) { stage = 4; break; } } rom++; } if (stage != 4) { iounmap(rom_base); continue; } return rom_base; } #endif return NULL; } static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg) { void __iomem *bios_header; void __iomem *header_ptr; u16 bios_header_offset, pll_info_offset; PLL_BLOCK pll; if (bios_seg) { bios_header = bios_seg + 0x48L; header_ptr = bios_header; bios_header_offset = readw(header_ptr); bios_header = bios_seg + bios_header_offset; bios_header += 0x30; header_ptr = bios_header; pll_info_offset = readw(header_ptr); header_ptr = bios_seg + pll_info_offset; memcpy_fromio(&pll, header_ptr, 50); rinfo->pll.xclk = (u32)pll.XCLK; rinfo->pll.ref_clk = (u32)pll.PCLK_ref_freq; rinfo->pll.ref_div = (u32)pll.PCLK_ref_divider; rinfo->pll.ppll_min = pll.PCLK_min_freq; rinfo->pll.ppll_max = pll.PCLK_max_freq; printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from BIOS\n", rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk); } else { #ifdef CONFIG_PPC_OF if (radeon_read_OF(rinfo)) { unsigned int tmp, Nx, M, ref_div, xclk; tmp = INPLL(X_MPLL_REF_FB_DIV); ref_div = INPLL(PPLL_REF_DIV) & 0x3ff; Nx = (tmp & 0xff00) >> 8; M = (tmp & 0xff); xclk = ((((2 * Nx * rinfo->pll.ref_clk) + (M)) / (2 * M))); rinfo->pll.xclk = xclk; rinfo->pll.ref_div = ref_div; rinfo->pll.ppll_min = 12000; rinfo->pll.ppll_max = 35000; printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from OF\n", rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk); return; } #endif /* no BIOS or BIOS not found, use defaults */ switch (rinfo->chipset) { case PCI_DEVICE_ID_ATI_RADEON_QW: case PCI_DEVICE_ID_ATI_RADEON_QX: rinfo->pll.ppll_max = 35000; rinfo->pll.ppll_min = 12000; rinfo->pll.xclk = 23000; rinfo->pll.ref_div = 12; rinfo->pll.ref_clk = 2700; break; case PCI_DEVICE_ID_ATI_RADEON_QL: case PCI_DEVICE_ID_ATI_RADEON_QN: case PCI_DEVICE_ID_ATI_RADEON_QO: case PCI_DEVICE_ID_ATI_RADEON_Ql: case PCI_DEVICE_ID_ATI_RADEON_BB: rinfo->pll.ppll_max = 35000; rinfo->pll.ppll_min = 12000; rinfo->pll.xclk = 27500; rinfo->pll.ref_div = 12; rinfo->pll.ref_clk = 2700; break; case PCI_DEVICE_ID_ATI_RADEON_Id: case PCI_DEVICE_ID_ATI_RADEON_Ie: case PCI_DEVICE_ID_ATI_RADEON_If: case PCI_DEVICE_ID_ATI_RADEON_Ig: rinfo->pll.ppll_max = 35000; rinfo->pll.ppll_min = 12000; rinfo->pll.xclk = 25000; rinfo->pll.ref_div = 12; rinfo->pll.ref_clk = 2700; break; case PCI_DEVICE_ID_ATI_RADEON_ND: case PCI_DEVICE_ID_ATI_RADEON_NE: case PCI_DEVICE_ID_ATI_RADEON_NF: case PCI_DEVICE_ID_ATI_RADEON_NG: rinfo->pll.ppll_max = 40000; rinfo->pll.ppll_min = 20000; rinfo->pll.xclk = 27000; rinfo->pll.ref_div = 12; rinfo->pll.ref_clk = 2700; break; case PCI_DEVICE_ID_ATI_RADEON_QD: case PCI_DEVICE_ID_ATI_RADEON_QE: case PCI_DEVICE_ID_ATI_RADEON_QF: case PCI_DEVICE_ID_ATI_RADEON_QG: default: rinfo->pll.ppll_max = 35000; rinfo->pll.ppll_min = 12000; rinfo->pll.xclk = 16600; rinfo->pll.ref_div = 67; rinfo->pll.ref_clk = 2700; break; } printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d defaults\n", rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk); } } static void radeon_get_moninfo (struct radeonfb_info *rinfo) { unsigned int tmp; if (force_dfp) { rinfo->dviDisp_type = MT_DFP; return; } tmp = INREG(BIOS_4_SCRATCH); printk(KERN_DEBUG "radeon_get_moninfo: bios 4 scratch = %x\n", tmp); if (rinfo->hasCRTC2) { /* primary DVI port */ if (tmp & 0x08) rinfo->dviDisp_type = MT_DFP; else if (tmp & 0x4) rinfo->dviDisp_type = MT_LCD; else if (tmp & 0x200) rinfo->dviDisp_type = MT_CRT; else if (tmp & 0x10) rinfo->dviDisp_type = MT_CTV; else if (tmp & 0x20) rinfo->dviDisp_type = MT_STV; /* secondary CRT port */ if (tmp & 0x2) rinfo->crtDisp_type = MT_CRT; else if (tmp & 0x800) rinfo->crtDisp_type = MT_DFP; else if (tmp & 0x400) rinfo->crtDisp_type = MT_LCD; else if (tmp & 0x1000) rinfo->crtDisp_type = MT_CTV; else if (tmp & 0x2000) rinfo->crtDisp_type = MT_STV; } else { rinfo->dviDisp_type = MT_NONE; tmp = INREG(FP_GEN_CNTL); if (tmp & FP_EN_TMDS) rinfo->crtDisp_type = MT_DFP; else rinfo->crtDisp_type = MT_CRT; } } static void radeon_get_EDID(struct radeonfb_info *rinfo) { #ifdef CONFIG_PPC_OF if (!radeon_get_EDID_OF(rinfo)) RTRACE("radeonfb: could not retrieve EDID from OF\n"); #else /* XXX use other methods later */ #endif } #ifdef CONFIG_PPC_OF static int radeon_get_EDID_OF(struct radeonfb_info *rinfo) { struct device_node *dp; unsigned char *pedid = NULL; static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", "EDID1", NULL }; int i; dp = pci_device_to_OF_node(rinfo->pdev); while (dp != NULL) { for (i = 0; propnames[i] != NULL; ++i) { pedid = (unsigned char *) get_property(dp, propnames[i], NULL); if (pedid != NULL) { rinfo->EDID = pedid; return 1; } } dp = dp->child; } return 0; } #endif /* CONFIG_PPC_OF */ static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo) { unsigned char *block = rinfo->EDID; if (!block) return 0; /* jump to the detailed timing block section */ block += 54; rinfo->clock = (block[0] + (block[1] << 8)); rinfo->panel_xres = (block[2] + ((block[4] & 0xf0) << 4)); rinfo->hblank = (block[3] + ((block[4] & 0x0f) << 8)); rinfo->panel_yres = (block[5] + ((block[7] & 0xf0) << 4)); rinfo->vblank = (block[6] + ((block[7] & 0x0f) << 8)); rinfo->hOver_plus = (block[8] + ((block[11] & 0xc0) << 2)); rinfo->hSync_width = (block[9] + ((block[11] & 0x30) << 4)); rinfo->vOver_plus = ((block[10] >> 4) + ((block[11] & 0x0c) << 2)); rinfo->vSync_width = ((block[10] & 0x0f) + ((block[11] & 0x03) << 4)); rinfo->interlaced = ((block[17] & 0x80) >> 7); rinfo->synct = ((block[17] & 0x18) >> 3); rinfo->misc = ((block[17] & 0x06) >> 1); rinfo->hAct_high = rinfo->vAct_high = 0; if (rinfo->synct == 3) { if (rinfo->misc & 2) rinfo->hAct_high = 1; if (rinfo->misc & 1) rinfo->vAct_high = 1; } printk("radeonfb: detected DFP panel size from EDID: %dx%d\n", rinfo->panel_xres, rinfo->panel_yres); rinfo->got_dfpinfo = 1; return 1; } static void radeon_update_default_var(struct radeonfb_info *rinfo) { struct fb_var_screeninfo *var = &radeonfb_default_var; var->xres = rinfo->panel_xres; var->yres = rinfo->panel_yres; var->xres_virtual = rinfo->panel_xres; var->yres_virtual = rinfo->panel_yres; var->xoffset = var->yoffset = 0; var->bits_per_pixel = 8; var->pixclock = 100000000 / rinfo->clock; var->left_margin = (rinfo->hblank - rinfo->hOver_plus - rinfo->hSync_width); var->right_margin = rinfo->hOver_plus; var->upper_margin = (rinfo->vblank - rinfo->vOver_plus - rinfo->vSync_width); var->lower_margin = rinfo->vOver_plus; var->hsync_len = rinfo->hSync_width; var->vsync_len = rinfo->vSync_width; var->sync = 0; if (rinfo->synct == 3) { if (rinfo->hAct_high) var->sync |= FB_SYNC_HOR_HIGH_ACT; if (rinfo->vAct_high) var->sync |= FB_SYNC_VERT_HIGH_ACT; } var->vmode = 0; if (rinfo->interlaced) var->vmode |= FB_VMODE_INTERLACED; rinfo->use_default_var = 1; } static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo) { char __iomem *fpbiosstart, *tmp, *tmp0; char stmp[30]; int i; if (!rinfo->bios_seg) return 0; if (!(fpbiosstart = rinfo->bios_seg + readw(rinfo->bios_seg + 0x48))) { printk("radeonfb: Failed to detect DFP panel info using BIOS\n"); return 0; } if (!(tmp = rinfo->bios_seg + readw(fpbiosstart + 0x40))) { printk("radeonfb: Failed to detect DFP panel info using BIOS\n"); return 0; } for(i=0; i<24; i++) stmp[i] = readb(tmp+i+1); stmp[24] = 0; printk("radeonfb: panel ID string: %s\n", stmp); rinfo->panel_xres = readw(tmp + 25); rinfo->panel_yres = readw(tmp + 27); printk("radeonfb: detected DFP panel size from BIOS: %dx%d\n", rinfo->panel_xres, rinfo->panel_yres); for(i=0; i<32; i++) { tmp0 = rinfo->bios_seg + readw(tmp+64+i*2); if (tmp0 == 0) break; if ((readw(tmp0) == rinfo->panel_xres) && (readw(tmp0+2) == rinfo->panel_yres)) { rinfo->hblank = (readw(tmp0+17) - readw(tmp0+19)) * 8; rinfo->hOver_plus = ((readw(tmp0+21) - readw(tmp0+19) -1) * 8) & 0x7fff; rinfo->hSync_width = readb(tmp0+23) * 8; rinfo->vblank = readw(tmp0+24) - readw(tmp0+26); rinfo->vOver_plus = (readw(tmp0+28) & 0x7ff) - readw(tmp0+26); rinfo->vSync_width = (readw(tmp0+28) & 0xf800) >> 11; rinfo->clock = readw(tmp0+9); rinfo->got_dfpinfo = 1; return 1; } } return 0; } static int radeon_get_dfpinfo (struct radeonfb_info *rinfo) { unsigned int tmp; unsigned short a, b; if (radeon_get_dfpinfo_BIOS(rinfo)) radeon_update_default_var(rinfo); if (radeon_dfp_parse_EDID(rinfo)) radeon_update_default_var(rinfo); if (!rinfo->got_dfpinfo) { /* * it seems all else has failed now and we * resort to probing registers for our DFP info */ if (panel_yres) { rinfo->panel_yres = panel_yres; } else { tmp = INREG(FP_VERT_STRETCH); tmp &= 0x00fff000; rinfo->panel_yres = (unsigned short)(tmp >> 0x0c) + 1; } switch (rinfo->panel_yres) { case 480: rinfo->panel_xres = 640; break; case 600: rinfo->panel_xres = 800; break; case 768: #if defined(__powerpc__) if (rinfo->dviDisp_type == MT_LCD) rinfo->panel_xres = 1152; else #endif rinfo->panel_xres = 1024; break; case 1024: rinfo->panel_xres = 1280; break; case 1050: rinfo->panel_xres = 1400; break; case 1200: rinfo->panel_xres = 1600; break; default: printk("radeonfb: Failed to detect DFP panel size\n"); return 0; } printk("radeonfb: detected DFP panel size from registers: %dx%d\n", rinfo->panel_xres, rinfo->panel_yres); tmp = INREG(FP_CRTC_H_TOTAL_DISP); a = (tmp & FP_CRTC_H_TOTAL_MASK) + 4; b = (tmp & 0x01ff0000) >> FP_CRTC_H_DISP_SHIFT; rinfo->hblank = (a - b + 1) * 8; tmp = INREG(FP_H_SYNC_STRT_WID); rinfo->hOver_plus = (unsigned short) ((tmp & FP_H_SYNC_STRT_CHAR_MASK) >> FP_H_SYNC_STRT_CHAR_SHIFT) - b - 1; rinfo->hOver_plus *= 8; rinfo->hSync_width = (unsigned short) ((tmp & FP_H_SYNC_WID_MASK) >> FP_H_SYNC_WID_SHIFT); rinfo->hSync_width *= 8; tmp = INREG(FP_CRTC_V_TOTAL_DISP); a = (tmp & FP_CRTC_V_TOTAL_MASK) + 1; b = (tmp & FP_CRTC_V_DISP_MASK) >> FP_CRTC_V_DISP_SHIFT; rinfo->vblank = a - b /* + 24 */ ; tmp = INREG(FP_V_SYNC_STRT_WID); rinfo->vOver_plus = (unsigned short) (tmp & FP_V_SYNC_STRT_MASK) - b + 1; rinfo->vSync_width = (unsigned short) ((tmp & FP_V_SYNC_WID_MASK) >> FP_V_SYNC_WID_SHIFT); return 1; } return 1; } #ifdef CONFIG_PPC_OF static int radeon_read_OF (struct radeonfb_info *rinfo) { struct device_node *dp; unsigned int *xtal; dp = pci_device_to_OF_node(rinfo->pdev); xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL); rinfo->pll.ref_clk = *xtal / 10; if (*xtal) return 1; else return 0; } #endif static void radeon_engine_init (struct radeonfb_info *rinfo) { u32 temp; /* disable 3D engine */ OUTREG(RB3D_CNTL, 0); radeon_engine_reset (); radeon_fifo_wait (1); OUTREG(RB2D_DSTCACHE_MODE, 0); radeon_fifo_wait (1); temp = INREG(DEFAULT_PITCH_OFFSET); OUTREG(DEFAULT_PITCH_OFFSET, ((temp & 0xc0000000) | (rinfo->pitch << 0x16))); radeon_fifo_wait (1); OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN); radeon_fifo_wait (1); OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX | DEFAULT_SC_BOTTOM_MAX)); temp = radeon_get_dstbpp(rinfo->depth); rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS); radeon_fifo_wait (1); OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl | GMC_BRUSH_SOLID_COLOR | GMC_SRC_DATATYPE_COLOR)); radeon_fifo_wait (7); /* clear line drawing regs */ OUTREG(DST_LINE_START, 0); OUTREG(DST_LINE_END, 0); /* set brush color regs */ OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff); OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000); /* set source color regs */ OUTREG(DP_SRC_FRGD_CLR, 0xffffffff); OUTREG(DP_SRC_BKGD_CLR, 0x00000000); /* default write mask */ OUTREG(DP_WRITE_MSK, 0xffffffff); radeon_engine_idle (); } static int __devinit radeon_init_disp (struct radeonfb_info *rinfo) { struct fb_info *info = &rinfo->info; struct fb_var_screeninfo var; var = radeonfb_default_var; if ((radeon_init_disp_var(rinfo, &var)) < 0) return -1; rinfo->depth = var_to_depth(&var); rinfo->bpp = var.bits_per_pixel; info->var = var; fb_alloc_cmap(&info->cmap, 256, 0); var.activate = FB_ACTIVATE_NOW; return 0; } static int radeon_init_disp_var (struct radeonfb_info *rinfo, struct fb_var_screeninfo *var) { #ifndef MODULE if (mode_option) fb_find_mode (var, &rinfo->info, mode_option, NULL, 0, NULL, 8); else #endif if (rinfo->use_default_var) /* We will use the modified default far */ *var = radeonfb_default_var; else fb_find_mode (var, &rinfo->info, "640x480-8@60", NULL, 0, NULL, 0); if (noaccel) var->accel_flags &= ~FB_ACCELF_TEXT; else var->accel_flags |= FB_ACCELF_TEXT; return 0; } static int radeon_do_maximize(struct radeonfb_info *rinfo, struct fb_var_screeninfo *var, struct fb_var_screeninfo *v, int nom, int den) { static struct { int xres, yres; } modes[] = { {1600, 1280}, {1280, 1024}, {1024, 768}, {800, 600}, {640, 480}, {-1, -1} }; int i; /* use highest possible virtual resolution */ if (v->xres_virtual == -1 && v->yres_virtual == -1) { printk("radeonfb: using max available virtual resolution\n"); for (i=0; modes[i].xres != -1; i++) { if (modes[i].xres * nom / den * modes[i].yres < rinfo->video_ram / 2) break; } if (modes[i].xres == -1) { printk("radeonfb: could not find virtual resolution that fits into video memory!\n"); return -EINVAL; } v->xres_virtual = modes[i].xres; v->yres_virtual = modes[i].yres; printk("radeonfb: virtual resolution set to max of %dx%d\n", v->xres_virtual, v->yres_virtual); } else if (v->xres_virtual == -1) { v->xres_virtual = (rinfo->video_ram * den / (nom * v->yres_virtual * 2)) & ~15; } else if (v->yres_virtual == -1) { v->xres_virtual = (v->xres_virtual + 15) & ~15; v->yres_virtual = rinfo->video_ram * den / (nom * v->xres_virtual *2); } else { if (v->xres_virtual * nom / den * v->yres_virtual > rinfo->video_ram) { return -EINVAL; } } if (v->xres_virtual * nom / den >= 8192) { v->xres_virtual = 8192 * den / nom - 16; } if (v->xres_virtual < v->xres) return -EINVAL; if (v->yres_virtual < v->yres) return -EINVAL; return 0; } static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *) info->par; struct fb_var_screeninfo v; int nom, den; memcpy (&v, var, sizeof (v)); switch (v.bits_per_pixel) { case 0 ... 8: v.bits_per_pixel = 8; break; case 9 ... 16: v.bits_per_pixel = 16; break; case 17 ... 24: #if 0 /* Doesn't seem to work */ v.bits_per_pixel = 24; break; #endif return -EINVAL; case 25 ... 32: v.bits_per_pixel = 32; break; default: return -EINVAL; } switch (var_to_depth(&v)) { case 8: nom = den = 1; v.red.offset = v.green.offset = v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 8; v.transp.offset = v.transp.length = 0; break; case 15: nom = 2; den = 1; v.red.offset = 10; v.green.offset = 5; v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 5; v.transp.offset = v.transp.length = 0; break; case 16: nom = 2; den = 1; v.red.offset = 11; v.green.offset = 5; v.blue.offset = 0; v.red.length = 5; v.green.length = 6; v.blue.length = 5; v.transp.offset = v.transp.length = 0; break; case 24: nom = 4; den = 1; v.red.offset = 16; v.green.offset = 8; v.blue.offset = 0; v.red.length = v.blue.length = v.green.length = 8; v.transp.offset = v.transp.length = 0; break; case 32: nom = 4; den = 1; v.red.offset = 16; v.green.offset = 8; v.blue.offset = 0; v.red.length = v.blue.length = v.green.length = 8; v.transp.offset = 24; v.transp.length = 8; break; default: printk ("radeonfb: mode %dx%dx%d rejected, color depth invalid\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } if (radeon_do_maximize(rinfo, var, &v, nom, den) < 0) return -EINVAL; if (v.xoffset < 0) v.xoffset = 0; if (v.yoffset < 0) v.yoffset = 0; if (v.xoffset > v.xres_virtual - v.xres) v.xoffset = v.xres_virtual - v.xres - 1; if (v.yoffset > v.yres_virtual - v.yres) v.yoffset = v.yres_virtual - v.yres - 1; v.red.msb_right = v.green.msb_right = v.blue.msb_right = v.transp.offset = v.transp.length = v.transp.msb_right = 0; if (noaccel) v.accel_flags = 0; memcpy(var, &v, sizeof(v)); return 0; } static int radeonfb_pan_display (struct fb_var_screeninfo *var, struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *) info; if ((var->xoffset + var->xres > var->xres_virtual) || (var->yoffset + var->yres > var->yres_virtual)) return -EINVAL; if (rinfo->asleep) return 0; OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset) * var->bits_per_pixel / 8) & ~7); return 0; } static int radeonfb_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg, struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *) info; unsigned int tmp; u32 value = 0; int rc; switch (cmd) { /* * TODO: set mirror accordingly for non-Mobility chipsets with 2 CRTC's */ case FBIO_RADEON_SET_MIRROR: switch (rinfo->arch) { case RADEON_R100: case RADEON_RV100: case RADEON_R200: case RADEON_RV200: case RADEON_RV250: case RADEON_R300: return -EINVAL; default: /* RADEON M6, RADEON_M7, RADEON_M9 */ break; } rc = get_user(value, (__u32 __user *)arg); if (rc) return rc; if (value & 0x01) { tmp = INREG(LVDS_GEN_CNTL); tmp |= (LVDS_ON | LVDS_BLON); } else { tmp = INREG(LVDS_GEN_CNTL); tmp &= ~(LVDS_ON | LVDS_BLON); } OUTREG(LVDS_GEN_CNTL, tmp); if (value & 0x02) { tmp = INREG(CRTC_EXT_CNTL); tmp |= CRTC_CRT_ON; mirror = 1; } else { tmp = INREG(CRTC_EXT_CNTL); tmp &= ~CRTC_CRT_ON; mirror = 0; } OUTREG(CRTC_EXT_CNTL, tmp); break; case FBIO_RADEON_GET_MIRROR: switch (rinfo->arch) { case RADEON_R100: case RADEON_RV100: case RADEON_R200: case RADEON_RV200: case RADEON_RV250: case RADEON_R300: return -EINVAL; default: /* RADEON M6, RADEON_M7, RADEON_M9 */ break; } tmp = INREG(LVDS_GEN_CNTL); if ((LVDS_ON | LVDS_BLON) & tmp) value |= 0x01; tmp = INREG(CRTC_EXT_CNTL); if (CRTC_CRT_ON & tmp) value |= 0x02; return put_user(value, (__u32 __user *)arg); default: return -EINVAL; } return -EINVAL; } static int radeonfb_blank (int blank, struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *) info; u32 val = INREG(CRTC_EXT_CNTL); u32 val2 = INREG(LVDS_GEN_CNTL); if (rinfo->asleep) return 0; #ifdef CONFIG_PMAC_BACKLIGHT if (rinfo->dviDisp_type == MT_LCD && _machine == _MACH_Pmac) { set_backlight_enable(!blank); return 0; } #endif /* reset it */ val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS | CRTC_VSYNC_DIS); val2 &= ~(LVDS_DISPLAY_DIS); switch (blank) { case FB_BLANK_UNBLANK: case FB_BLANK_NORMAL: break; case FB_BLANK_VSYNC_SUSPEND: val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS); break; case FB_BLANK_HSYNC_SUSPEND: val |= (CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS); break; case FB_BLANK_POWERDOWN: val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS | CRTC_HSYNC_DIS); val2 |= (LVDS_DISPLAY_DIS); break; } switch (rinfo->dviDisp_type) { case MT_LCD: OUTREG(LVDS_GEN_CNTL, val2); break; case MT_CRT: default: OUTREG(CRTC_EXT_CNTL, val); break; } /* let fbcon do a soft blank for us */ return (blank == FB_BLANK_NORMAL) ? 1 : 0; } static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *) info; u32 pindex, vclk_cntl; unsigned int i; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; rinfo->palette[regno].red = red; rinfo->palette[regno].green = green; rinfo->palette[regno].blue = blue; /* default */ pindex = regno; if (!rinfo->asleep) { vclk_cntl = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb); if (rinfo->bpp == 16) { pindex = regno * 8; if (rinfo->depth == 16 && regno > 63) return 1; if (rinfo->depth == 15 && regno > 31) return 1; /* For 565, the green component is mixed one order below */ if (rinfo->depth == 16) { OUTREG(PALETTE_INDEX, pindex>>1); OUTREG(PALETTE_DATA, (rinfo->palette[regno>>1].red << 16) | (green << 8) | (rinfo->palette[regno>>1].blue)); green = rinfo->palette[regno<<1].green; } } if (rinfo->depth != 16 || regno < 32) { OUTREG(PALETTE_INDEX, pindex); OUTREG(PALETTE_DATA, (red << 16) | (green << 8) | blue); } OUTPLL(VCLK_ECP_CNTL, vclk_cntl); } if (regno < 16) { switch (rinfo->depth) { case 15: ((u16 *) (info->pseudo_palette))[regno] = (regno << 10) | (regno << 5) | regno; break; case 16: ((u16 *) (info->pseudo_palette))[regno] = (regno << 11) | (regno << 6) | regno; break; case 24: ((u32 *) (info->pseudo_palette))[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: i = (regno << 8) | regno; ((u32 *) (info->pseudo_palette))[regno] = (i << 16) | i; break; } } return 0; } static void radeon_save_state (struct radeonfb_info *rinfo, struct radeon_regs *save) { /* CRTC regs */ save->crtc_gen_cntl = INREG(CRTC_GEN_CNTL); save->crtc_ext_cntl = INREG(CRTC_EXT_CNTL); save->dac_cntl = INREG(DAC_CNTL); save->crtc_h_total_disp = INREG(CRTC_H_TOTAL_DISP); save->crtc_h_sync_strt_wid = INREG(CRTC_H_SYNC_STRT_WID); save->crtc_v_total_disp = INREG(CRTC_V_TOTAL_DISP); save->crtc_v_sync_strt_wid = INREG(CRTC_V_SYNC_STRT_WID); save->crtc_pitch = INREG(CRTC_PITCH); #if defined(__BIG_ENDIAN) save->surface_cntl = INREG(SURFACE_CNTL); #endif /* FP regs */ save->fp_crtc_h_total_disp = INREG(FP_CRTC_H_TOTAL_DISP); save->fp_crtc_v_total_disp = INREG(FP_CRTC_V_TOTAL_DISP); save->fp_gen_cntl = INREG(FP_GEN_CNTL); save->fp_h_sync_strt_wid = INREG(FP_H_SYNC_STRT_WID); save->fp_horz_stretch = INREG(FP_HORZ_STRETCH); save->fp_v_sync_strt_wid = INREG(FP_V_SYNC_STRT_WID); save->fp_vert_stretch = INREG(FP_VERT_STRETCH); save->lvds_gen_cntl = INREG(LVDS_GEN_CNTL); save->lvds_pll_cntl = INREG(LVDS_PLL_CNTL); save->tmds_crc = INREG(TMDS_CRC); save->tmds_transmitter_cntl = INREG(TMDS_TRANSMITTER_CNTL); save->vclk_ecp_cntl = INPLL(VCLK_ECP_CNTL); } static int radeonfb_set_par (struct fb_info *info) { struct radeonfb_info *rinfo = (struct radeonfb_info *)info->par; struct fb_var_screeninfo *mode = &info->var; struct radeon_regs newmode; int hTotal, vTotal, hSyncStart, hSyncEnd, hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync; u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5}; u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5}; u32 dotClock = 1000000000 / mode->pixclock, sync, h_sync_pol, v_sync_pol; int freq = dotClock / 10; /* x 100 */ int xclk_freq, vclk_freq, xclk_per_trans, xclk_per_trans_precise; int useable_precision, roff, ron; int min_bits, format = 0; int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid; int primary_mon = PRIMARY_MONITOR(rinfo); int depth = var_to_depth(mode); int accel = (mode->accel_flags & FB_ACCELF_TEXT) != 0; rinfo->xres = mode->xres; rinfo->yres = mode->yres; rinfo->xres_virtual = mode->xres_virtual; rinfo->yres_virtual = mode->yres_virtual; rinfo->pixclock = mode->pixclock; hSyncStart = mode->xres + mode->right_margin; hSyncEnd = hSyncStart + mode->hsync_len; hTotal = hSyncEnd + mode->left_margin; vSyncStart = mode->yres + mode->lower_margin; vSyncEnd = vSyncStart + mode->vsync_len; vTotal = vSyncEnd + mode->upper_margin; if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { if (rinfo->panel_xres < mode->xres) rinfo->xres = mode->xres = rinfo->panel_xres; if (rinfo->panel_yres < mode->yres) rinfo->yres = mode->yres = rinfo->panel_yres; hTotal = mode->xres + rinfo->hblank; hSyncStart = mode->xres + rinfo->hOver_plus; hSyncEnd = hSyncStart + rinfo->hSync_width; vTotal = mode->yres + rinfo->vblank; vSyncStart = mode->yres + rinfo->vOver_plus; vSyncEnd = vSyncStart + rinfo->vSync_width; } sync = mode->sync; h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; RTRACE("hStart = %d, hEnd = %d, hTotal = %d\n", hSyncStart, hSyncEnd, hTotal); RTRACE("vStart = %d, vEnd = %d, vTotal = %d\n", vSyncStart, vSyncEnd, vTotal); hsync_wid = (hSyncEnd - hSyncStart) / 8; vsync_wid = vSyncEnd - vSyncStart; if (hsync_wid == 0) hsync_wid = 1; else if (hsync_wid > 0x3f) /* max */ hsync_wid = 0x3f; if (vsync_wid == 0) vsync_wid = 1; else if (vsync_wid > 0x1f) /* max */ vsync_wid = 0x1f; hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0; format = radeon_get_dstbpp(depth); bytpp = mode->bits_per_pixel >> 3; if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) hsync_fudge = hsync_fudge_fp[format-1]; else hsync_fudge = hsync_adj_tab[format-1]; hsync_start = hSyncStart - 8 + hsync_fudge; newmode.crtc_gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | (format << 8); if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN; if (mirror) newmode.crtc_ext_cntl |= CRTC_CRT_ON; newmode.crtc_gen_cntl &= ~(CRTC_DBL_SCAN_EN | CRTC_INTERLACE_EN); } else { newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN | CRTC_CRT_ON; } newmode.dac_cntl = /* INREG(DAC_CNTL) | */ DAC_MASK_ALL | DAC_VGA_ADR_EN | DAC_8BIT_EN; newmode.crtc_h_total_disp = ((((hTotal / 8) - 1) & 0x3ff) | (((mode->xres / 8) - 1) << 16)); newmode.crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) | (hsync_wid << 16) | (h_sync_pol << 23)); newmode.crtc_v_total_disp = ((vTotal - 1) & 0xffff) | ((mode->yres - 1) << 16); newmode.crtc_v_sync_strt_wid = (((vSyncStart - 1) & 0xfff) | (vsync_wid << 16) | (v_sync_pol << 23)); if (accel) { /* We first calculate the engine pitch */ rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f) & ~(0x3f)) >> 6; /* Then, re-multiply it to get the CRTC pitch */ newmode.crtc_pitch = (rinfo->pitch << 3) / ((mode->bits_per_pixel + 1) / 8); } else newmode.crtc_pitch = (mode->xres_virtual >> 3); newmode.crtc_pitch |= (newmode.crtc_pitch << 16); #if defined(__BIG_ENDIAN) /* * It looks like recent chips have a problem with SURFACE_CNTL, * setting SURF_TRANSLATION_DIS completely disables the * swapper as well, so we leave it unset now. */ newmode.surface_cntl = 0; /* Setup swapping on both apertures, though we currently * only use aperture 0, enabling swapper on aperture 1 * won't harm */ switch (mode->bits_per_pixel) { case 16: newmode.surface_cntl |= NONSURF_AP0_SWP_16BPP; newmode.surface_cntl |= NONSURF_AP1_SWP_16BPP; break; case 24: case 32: newmode.surface_cntl |= NONSURF_AP0_SWP_32BPP; newmode.surface_cntl |= NONSURF_AP1_SWP_32BPP; break; } #endif rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f) & ~(0x3f)) / 64; RTRACE("h_total_disp = 0x%x\t hsync_strt_wid = 0x%x\n", newmode.crtc_h_total_disp, newmode.crtc_h_sync_strt_wid); RTRACE("v_total_disp = 0x%x\t vsync_strt_wid = 0x%x\n", newmode.crtc_v_total_disp, newmode.crtc_v_sync_strt_wid); newmode.xres = mode->xres; newmode.yres = mode->yres; rinfo->bpp = mode->bits_per_pixel; rinfo->depth = depth; if (freq > rinfo->pll.ppll_max) freq = rinfo->pll.ppll_max; if (freq*12 < rinfo->pll.ppll_min) freq = rinfo->pll.ppll_min / 12; { struct { int divider; int bitvalue; } *post_div, post_divs[] = { { 1, 0 }, { 2, 1 }, { 4, 2 }, { 8, 3 }, { 3, 4 }, { 16, 5 }, { 6, 6 }, { 12, 7 }, { 0, 0 }, }; for (post_div = &post_divs[0]; post_div->divider; ++post_div) { rinfo->pll_output_freq = post_div->divider * freq; if (rinfo->pll_output_freq >= rinfo->pll.ppll_min && rinfo->pll_output_freq <= rinfo->pll.ppll_max) break; } rinfo->post_div = post_div->divider; rinfo->fb_div = round_div(rinfo->pll.ref_div*rinfo->pll_output_freq, rinfo->pll.ref_clk); newmode.ppll_ref_div = rinfo->pll.ref_div; newmode.ppll_div_3 = rinfo->fb_div | (post_div->bitvalue << 16); } newmode.vclk_ecp_cntl = rinfo->init_state.vclk_ecp_cntl; #ifdef CONFIG_PPC_OF /* Gross hack for iBook with M7 until I find out a proper fix */ if (machine_is_compatible("PowerBook4,3") && rinfo->arch == RADEON_M7) newmode.ppll_div_3 = 0x000600ad; #endif /* CONFIG_PPC_OF */ RTRACE("post div = 0x%x\n", rinfo->post_div); RTRACE("fb_div = 0x%x\n", rinfo->fb_div); RTRACE("ppll_div_3 = 0x%x\n", newmode.ppll_div_3); /* DDA */ vclk_freq = round_div(rinfo->pll.ref_clk * rinfo->fb_div, rinfo->pll.ref_div * rinfo->post_div); xclk_freq = rinfo->pll.xclk; xclk_per_trans = round_div(xclk_freq * 128, vclk_freq * mode->bits_per_pixel); min_bits = min_bits_req(xclk_per_trans); useable_precision = min_bits + 1; xclk_per_trans_precise = round_div((xclk_freq * 128) << (11 - useable_precision), vclk_freq * mode->bits_per_pixel); ron = (4 * rinfo->ram.mb + 3 * _max(rinfo->ram.trcd - 2, 0) + 2 * rinfo->ram.trp + rinfo->ram.twr + rinfo->ram.cl + rinfo->ram.tr2w + xclk_per_trans) << (11 - useable_precision); roff = xclk_per_trans_precise * (32 - 4); RTRACE("ron = %d, roff = %d\n", ron, roff); RTRACE("vclk_freq = %d, per = %d\n", vclk_freq, xclk_per_trans_precise); if ((ron + rinfo->ram.rloop) >= roff) { printk("radeonfb: error ron out of range\n"); return -EINVAL; } newmode.dda_config = (xclk_per_trans_precise | (useable_precision << 16) | (rinfo->ram.rloop << 20)); newmode.dda_on_off = (ron << 16) | roff; if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { unsigned int hRatio, vRatio; /* We force the pixel clock to be always enabled. Allowing it * to be power managed during blanking would save power, but has * nasty interactions with the 2D engine & sleep code that haven't * been solved yet. --BenH */ newmode.vclk_ecp_cntl &= ~PIXCLK_DAC_ALWAYS_ONb; if (mode->xres > rinfo->panel_xres) mode->xres = rinfo->panel_xres; if (mode->yres > rinfo->panel_yres) mode->yres = rinfo->panel_yres; newmode.fp_horz_stretch = (((rinfo->panel_xres / 8) - 1) << HORZ_PANEL_SHIFT); newmode.fp_vert_stretch = ((rinfo->panel_yres - 1) << VERT_PANEL_SHIFT); if (mode->xres != rinfo->panel_xres) { hRatio = round_div(mode->xres * HORZ_STRETCH_RATIO_MAX, rinfo->panel_xres); newmode.fp_horz_stretch = (((((unsigned long)hRatio) & HORZ_STRETCH_RATIO_MASK)) | (newmode.fp_horz_stretch & (HORZ_PANEL_SIZE | HORZ_FP_LOOP_STRETCH | HORZ_AUTO_RATIO_INC))); newmode.fp_horz_stretch |= (HORZ_STRETCH_BLEND | HORZ_STRETCH_ENABLE); } newmode.fp_horz_stretch &= ~HORZ_AUTO_RATIO; if (mode->yres != rinfo->panel_yres) { vRatio = round_div(mode->yres * VERT_STRETCH_RATIO_MAX, rinfo->panel_yres); newmode.fp_vert_stretch = (((((unsigned long)vRatio) & VERT_STRETCH_RATIO_MASK)) | (newmode.fp_vert_stretch & (VERT_PANEL_SIZE | VERT_STRETCH_RESERVED))); newmode.fp_vert_stretch |= (VERT_STRETCH_BLEND | VERT_STRETCH_ENABLE); } newmode.fp_vert_stretch &= ~VERT_AUTO_RATIO_EN; newmode.fp_gen_cntl = (rinfo->init_state.fp_gen_cntl & (u32) ~(FP_SEL_CRTC2 | FP_RMX_HVSYNC_CONTROL_EN | FP_DFP_SYNC_SEL | FP_CRT_SYNC_SEL | FP_CRTC_LOCK_8DOT | FP_USE_SHADOW_EN | FP_CRTC_USE_SHADOW_VEND | FP_CRT_SYNC_ALT)); newmode.fp_gen_cntl |= (FP_CRTC_DONT_SHADOW_VPAR | FP_CRTC_DONT_SHADOW_HEND); newmode.lvds_gen_cntl = rinfo->init_state.lvds_gen_cntl; newmode.lvds_pll_cntl = rinfo->init_state.lvds_pll_cntl; newmode.tmds_crc = rinfo->init_state.tmds_crc; newmode.tmds_transmitter_cntl = rinfo->init_state.tmds_transmitter_cntl; if (primary_mon == MT_LCD) { newmode.lvds_gen_cntl |= (LVDS_ON | LVDS_BLON); newmode.fp_gen_cntl &= ~(FP_FPON | FP_TMDS_EN); } else { /* DFP */ newmode.fp_gen_cntl |= (FP_FPON | FP_TMDS_EN); newmode.tmds_transmitter_cntl = (TMDS_RAN_PAT_RST | TMDS_ICHCSEL | TMDS_PLL_EN) & ~(TMDS_PLLRST); newmode.crtc_ext_cntl &= ~CRTC_CRT_ON; } newmode.fp_crtc_h_total_disp = (((rinfo->hblank / 8) & 0x3ff) | (((mode->xres / 8) - 1) << 16)); newmode.fp_crtc_v_total_disp = (rinfo->vblank & 0xffff) | ((mode->yres - 1) << 16); newmode.fp_h_sync_strt_wid = ((rinfo->hOver_plus & 0x1fff) | (hsync_wid << 16) | (h_sync_pol << 23)); newmode.fp_v_sync_strt_wid = ((rinfo->vOver_plus & 0xfff) | (vsync_wid << 16) | (v_sync_pol << 23)); } /* do it! */ if (!rinfo->asleep) { radeon_write_mode (rinfo, &newmode); /* (re)initialize the engine */ if (noaccel) radeon_engine_init (rinfo); } /* Update fix */ if (accel) info->fix.line_length = rinfo->pitch*64; else info->fix.line_length = mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8); info->fix.visual = rinfo->depth == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; #ifdef CONFIG_BOOTX_TEXT /* Update debug text engine */ btext_update_display(rinfo->fb_base_phys, mode->xres, mode->yres, rinfo->depth, info->fix.line_length); #endif return 0; } static void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode) { int i; int primary_mon = PRIMARY_MONITOR(rinfo); radeonfb_blank(VESA_POWERDOWN, (struct fb_info *)rinfo); if (rinfo->arch == RADEON_M6) { for (i=0; i<8; i++) OUTREG(common_regs_m6[i].reg, common_regs_m6[i].val); } else { for (i=0; i<9; i++) OUTREG(common_regs[i].reg, common_regs[i].val); } OUTREG(CRTC_GEN_CNTL, mode->crtc_gen_cntl); OUTREGP(CRTC_EXT_CNTL, mode->crtc_ext_cntl, CRTC_HSYNC_DIS | CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS); OUTREGP(DAC_CNTL, mode->dac_cntl, DAC_RANGE_CNTL | DAC_BLANKING); OUTREG(CRTC_H_TOTAL_DISP, mode->crtc_h_total_disp); OUTREG(CRTC_H_SYNC_STRT_WID, mode->crtc_h_sync_strt_wid); OUTREG(CRTC_V_TOTAL_DISP, mode->crtc_v_total_disp); OUTREG(CRTC_V_SYNC_STRT_WID, mode->crtc_v_sync_strt_wid); OUTREG(CRTC_OFFSET, 0); OUTREG(CRTC_OFFSET_CNTL, 0); OUTREG(CRTC_PITCH, mode->crtc_pitch); #if defined(__BIG_ENDIAN) OUTREG(SURFACE_CNTL, mode->surface_cntl); #endif while ((INREG(CLOCK_CNTL_INDEX) & PPLL_DIV_SEL_MASK) != PPLL_DIV_SEL_MASK) { OUTREGP(CLOCK_CNTL_INDEX, PPLL_DIV_SEL_MASK, 0xffff); } OUTPLLP(PPLL_CNTL, PPLL_RESET, 0xffff); while ((INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK) != (mode->ppll_ref_div & PPLL_REF_DIV_MASK)) { OUTPLLP(PPLL_REF_DIV, mode->ppll_ref_div, ~PPLL_REF_DIV_MASK); } while ((INPLL(PPLL_DIV_3) & PPLL_FB3_DIV_MASK) != (mode->ppll_div_3 & PPLL_FB3_DIV_MASK)) { OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_FB3_DIV_MASK); } while ((INPLL(PPLL_DIV_3) & PPLL_POST3_DIV_MASK) != (mode->ppll_div_3 & PPLL_POST3_DIV_MASK)) { OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_POST3_DIV_MASK); } OUTPLL(HTOTAL_CNTL, 0); OUTPLLP(PPLL_CNTL, 0, ~PPLL_RESET); // OUTREG(DDA_CONFIG, mode->dda_config); // OUTREG(DDA_ON_OFF, mode->dda_on_off); if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp); OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp); OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid); OUTREG(FP_V_SYNC_STRT_WID, mode->fp_v_sync_strt_wid); OUTREG(FP_HORZ_STRETCH, mode->fp_horz_stretch); OUTREG(FP_VERT_STRETCH, mode->fp_vert_stretch); OUTREG(FP_GEN_CNTL, mode->fp_gen_cntl); OUTREG(TMDS_CRC, mode->tmds_crc); OUTREG(TMDS_TRANSMITTER_CNTL, mode->tmds_transmitter_cntl); if (primary_mon == MT_LCD) { unsigned int tmp = INREG(LVDS_GEN_CNTL); mode->lvds_gen_cntl &= ~LVDS_STATE_MASK; mode->lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_STATE_MASK); if ((tmp & (LVDS_ON | LVDS_BLON)) == (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON))) { OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl); } else { if (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON)) { udelay(1000); OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl); } else { OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl | LVDS_BLON); udelay(1000); OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl); } } } } radeonfb_blank(VESA_NO_BLANKING, (struct fb_info *)rinfo); OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl); return; } static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = radeonfb_check_var, .fb_set_par = radeonfb_set_par, .fb_setcolreg = radeonfb_setcolreg, .fb_pan_display = radeonfb_pan_display, .fb_blank = radeonfb_blank, .fb_ioctl = radeonfb_ioctl, #if 0 .fb_fillrect = radeonfb_fillrect, .fb_copyarea = radeonfb_copyarea, .fb_imageblit = radeonfb_imageblit, .fb_rasterimg = radeonfb_rasterimg, #else .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, #endif .fb_cursor = soft_cursor, }; static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) { struct fb_info *info; info = &rinfo->info; info->par = rinfo; info->pseudo_palette = rinfo->pseudo_palette; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; info->fbops = &radeonfb_ops; info->screen_base = rinfo->fb_base; /* Fill fix common fields */ strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id)); info->fix.smem_start = rinfo->fb_base_phys; info->fix.smem_len = rinfo->video_ram; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.xpanstep = 8; info->fix.ypanstep = 1; info->fix.ywrapstep = 0; info->fix.type_aux = 0; info->fix.mmio_start = rinfo->mmio_base_phys; info->fix.mmio_len = RADEON_REGSIZE; if (noaccel) info->fix.accel = FB_ACCEL_NONE; else info->fix.accel = FB_ACCEL_ATI_RADEON; if (radeon_init_disp (rinfo) < 0) return -1; return 0; } #ifdef CONFIG_PMAC_BACKLIGHT /* TODO: Dbl check these tables, we don't go up to full ON backlight * in these, possibly because we noticed MacOS doesn't, but I'd prefer * having some more official numbers from ATI */ static int backlight_conv_m6[] = { 0xff, 0xc0, 0xb5, 0xaa, 0x9f, 0x94, 0x89, 0x7e, 0x73, 0x68, 0x5d, 0x52, 0x47, 0x3c, 0x31, 0x24 }; static int backlight_conv_m7[] = { 0x00, 0x3f, 0x4a, 0x55, 0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad, 0xb8, 0xc3, 0xce, 0xd9 }; #define BACKLIGHT_LVDS_OFF #undef BACKLIGHT_DAC_OFF /* We turn off the LCD completely instead of just dimming the backlight. * This provides some greater power saving and the display is useless * without backlight anyway. */ static int radeon_set_backlight_enable(int on, int level, void *data) { struct radeonfb_info *rinfo = (struct radeonfb_info *)data; unsigned int lvds_gen_cntl = INREG(LVDS_GEN_CNTL); int* conv_table; /* Pardon me for that hack... maybe some day we can figure * out in what direction backlight should work on a given * panel ? */ if ((rinfo->arch == RADEON_M7 || rinfo->arch == RADEON_M9) && !machine_is_compatible("PowerBook4,3")) conv_table = backlight_conv_m7; else conv_table = backlight_conv_m6; lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON); if (on && (level > BACKLIGHT_OFF)) { lvds_gen_cntl |= LVDS_DIGON; if (!(lvds_gen_cntl & LVDS_ON)) { lvds_gen_cntl &= ~LVDS_BLON; OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); (void)INREG(LVDS_GEN_CNTL); mdelay(10); lvds_gen_cntl |= LVDS_BLON; OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); } lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK; lvds_gen_cntl |= (conv_table[level] << LVDS_BL_MOD_LEVEL_SHIFT); lvds_gen_cntl |= (LVDS_ON | LVDS_EN); lvds_gen_cntl &= ~LVDS_DISPLAY_DIS; } else { lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK; lvds_gen_cntl |= (conv_table[0] << LVDS_BL_MOD_LEVEL_SHIFT); lvds_gen_cntl |= LVDS_DISPLAY_DIS; OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); udelay(10); lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGON); } OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK; rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK); return 0; } static int radeon_set_backlight_level(int level, void *data) { return radeon_set_backlight_enable(1, level, data); } #endif /* CONFIG_PMAC_BACKLIGHT */ #ifdef CONFIG_PMAC_PBOOK static u32 dbg_clk; /* * Radeon M6 Power Management code. This code currently only supports * the mobile chips, it's based from some informations provided by ATI * along with hours of tracing of MacOS drivers */ static void radeon_pm_save_regs(struct radeonfb_info *rinfo) { rinfo->save_regs[0] = INPLL(PLL_PWRMGT_CNTL); rinfo->save_regs[1] = INPLL(CLK_PWRMGT_CNTL); rinfo->save_regs[2] = INPLL(MCLK_CNTL); rinfo->save_regs[3] = INPLL(SCLK_CNTL); rinfo->save_regs[4] = INPLL(CLK_PIN_CNTL); rinfo->save_regs[5] = INPLL(VCLK_ECP_CNTL); rinfo->save_regs[6] = INPLL(PIXCLKS_CNTL); rinfo->save_regs[7] = INPLL(MCLK_MISC); rinfo->save_regs[8] = INPLL(P2PLL_CNTL); rinfo->save_regs[9] = INREG(DISP_MISC_CNTL); rinfo->save_regs[10] = INREG(DISP_PWR_MAN); rinfo->save_regs[11] = INREG(LVDS_GEN_CNTL); rinfo->save_regs[12] = INREG(LVDS_PLL_CNTL); rinfo->save_regs[13] = INREG(TV_DAC_CNTL); rinfo->save_regs[14] = INREG(BUS_CNTL1); rinfo->save_regs[15] = INREG(CRTC_OFFSET_CNTL); rinfo->save_regs[16] = INREG(AGP_CNTL); rinfo->save_regs[17] = (INREG(CRTC_GEN_CNTL) & 0xfdffffff) | 0x04000000; rinfo->save_regs[18] = (INREG(CRTC2_GEN_CNTL) & 0xfdffffff) | 0x04000000; rinfo->save_regs[19] = INREG(GPIOPAD_A); rinfo->save_regs[20] = INREG(GPIOPAD_EN); rinfo->save_regs[21] = INREG(GPIOPAD_MASK); rinfo->save_regs[22] = INREG(ZV_LCDPAD_A); rinfo->save_regs[23] = INREG(ZV_LCDPAD_EN); rinfo->save_regs[24] = INREG(ZV_LCDPAD_MASK); rinfo->save_regs[25] = INREG(GPIO_VGA_DDC); rinfo->save_regs[26] = INREG(GPIO_DVI_DDC); rinfo->save_regs[27] = INREG(GPIO_MONID); rinfo->save_regs[28] = INREG(GPIO_CRT2_DDC); rinfo->save_regs[29] = INREG(SURFACE_CNTL); rinfo->save_regs[30] = INREG(MC_FB_LOCATION); rinfo->save_regs[31] = INREG(DISPLAY_BASE_ADDR); rinfo->save_regs[32] = INREG(MC_AGP_LOCATION); rinfo->save_regs[33] = INREG(CRTC2_DISPLAY_BASE_ADDR); } static void radeon_pm_restore_regs(struct radeonfb_info *rinfo) { OUTPLL(P2PLL_CNTL, rinfo->save_regs[8] & 0xFFFFFFFE); /* First */ OUTPLL(PLL_PWRMGT_CNTL, rinfo->save_regs[0]); OUTPLL(CLK_PWRMGT_CNTL, rinfo->save_regs[1]); OUTPLL(MCLK_CNTL, rinfo->save_regs[2]); OUTPLL(SCLK_CNTL, rinfo->save_regs[3]); OUTPLL(CLK_PIN_CNTL, rinfo->save_regs[4]); OUTPLL(VCLK_ECP_CNTL, rinfo->save_regs[5]); OUTPLL(PIXCLKS_CNTL, rinfo->save_regs[6]); OUTPLL(MCLK_MISC, rinfo->save_regs[7]); OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]); OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11]); OUTREG(LVDS_PLL_CNTL,rinfo->save_regs[12]); OUTREG(TV_DAC_CNTL, rinfo->save_regs[13]); OUTREG(BUS_CNTL1, rinfo->save_regs[14]); OUTREG(CRTC_OFFSET_CNTL, rinfo->save_regs[15]); OUTREG(AGP_CNTL, rinfo->save_regs[16]); OUTREG(CRTC_GEN_CNTL, rinfo->save_regs[17]); OUTREG(CRTC2_GEN_CNTL, rinfo->save_regs[18]); // wait VBL before that one ? OUTPLL(P2PLL_CNTL, rinfo->save_regs[8]); OUTREG(GPIOPAD_A, rinfo->save_regs[19]); OUTREG(GPIOPAD_EN, rinfo->save_regs[20]); OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]); OUTREG(ZV_LCDPAD_A, rinfo->save_regs[22]); OUTREG(ZV_LCDPAD_EN, rinfo->save_regs[23]); OUTREG(ZV_LCDPAD_MASK, rinfo->save_regs[24]); OUTREG(GPIO_VGA_DDC, rinfo->save_regs[25]); OUTREG(GPIO_DVI_DDC, rinfo->save_regs[26]); OUTREG(GPIO_MONID, rinfo->save_regs[27]); OUTREG(GPIO_CRT2_DDC, rinfo->save_regs[28]); } static void radeon_pm_disable_iopad(struct radeonfb_info *rinfo) { OUTREG(GPIOPAD_MASK, 0x0001ffff); OUTREG(GPIOPAD_EN, 0x00000400); OUTREG(GPIOPAD_A, 0x00000000); OUTREG(ZV_LCDPAD_MASK, 0x00000000); OUTREG(ZV_LCDPAD_EN, 0x00000000); OUTREG(ZV_LCDPAD_A, 0x00000000); OUTREG(GPIO_VGA_DDC, 0x00030000); OUTREG(GPIO_DVI_DDC, 0x00000000); OUTREG(GPIO_MONID, 0x00030000); OUTREG(GPIO_CRT2_DDC, 0x00000000); } static void radeon_pm_program_v2clk(struct radeonfb_info *rinfo) { // // u32 reg; // // OUTPLL(P2PLL_REF_DIV, 0x0c); // // .../... figure out what macos does here } static void radeon_pm_low_current(struct radeonfb_info *rinfo) { u32 reg; reg = INREG(BUS_CNTL1); reg &= ~BUS_CNTL1_MOBILE_PLATFORM_SEL_MASK; reg |= BUS_CNTL1_AGPCLK_VALID | (1<<BUS_CNTL1_MOBILE_PLATFORM_SEL_SHIFT); OUTREG(BUS_CNTL1, reg); reg = INPLL(PLL_PWRMGT_CNTL); reg |= PLL_PWRMGT_CNTL_SPLL_TURNOFF | PLL_PWRMGT_CNTL_PPLL_TURNOFF | PLL_PWRMGT_CNTL_P2PLL_TURNOFF | PLL_PWRMGT_CNTL_TVPLL_TURNOFF; reg &= ~PLL_PWRMGT_CNTL_SU_MCLK_USE_BCLK; reg &= ~PLL_PWRMGT_CNTL_MOBILE_SU; OUTPLL(PLL_PWRMGT_CNTL, reg); // reg = INPLL(TV_PLL_CNTL1); // reg |= TV_PLL_CNTL1__TVPLL_RESET | TV_PLL_CNTL1__TVPLL_SLEEP; // OUTPLL(TV_PLL_CNTL1, reg); reg = INREG(TV_DAC_CNTL); reg &= ~(TV_DAC_CNTL_BGADJ_MASK |TV_DAC_CNTL_DACADJ_MASK); reg |=TV_DAC_CNTL_BGSLEEP | TV_DAC_CNTL_RDACPD | TV_DAC_CNTL_GDACPD | TV_DAC_CNTL_BDACPD | (8<<TV_DAC_CNTL_BGADJ__SHIFT) | (8<<TV_DAC_CNTL_DACADJ__SHIFT); OUTREG(TV_DAC_CNTL, reg); reg = INREG(TMDS_TRANSMITTER_CNTL); reg &= ~(TMDS_PLL_EN |TMDS_PLLRST); OUTREG(TMDS_TRANSMITTER_CNTL, reg); // lvds_pll_cntl = regr32(g, LVDS_PLL_CNTL); // lvds_pll_cntl &= ~LVDS_PLL_CNTL__LVDS_PLL_EN; // lvds_pll_cntl |= LVDS_PLL_CNTL__LVDS_PLL_RESET; // regw32(g, LVDS_PLL_CNTL, lvds_pll_cntl); reg = INREG(DAC_CNTL); reg &= ~DAC_CMP_EN; OUTREG(DAC_CNTL, reg); reg = INREG(DAC_CNTL2); reg &= ~DAC2_CMP_EN; OUTREG(DAC_CNTL2, reg); reg = INREG(TV_DAC_CNTL); reg &= ~TV_DAC_CNTL_DETECT; OUTREG(TV_DAC_CNTL, reg); } static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo) { /* This code is disabled. It does what is in the pm_init * function of the MacOS driver code ATI sent me. However, * it doesn't fix my sleep problem, and is causing other issues * on wakeup (bascially the machine dying when switching consoles * I haven't had time to investigate this yet */ #if 0 u32 disp_misc_cntl; u32 disp_pwr_man; u32 temp; // set SPLL, MPLL, PPLL, P2PLL, TVPLL, SCLK, MCLK, PCLK, P2CLK, // TCLK and TEST_MODE to 0 temp = INPLL(CLK_PWRMGT_CNTL); OUTPLL(CLK_PWRMGT_CNTL , temp & ~0xc00002ff); // Turn on Power Management temp = INPLL(CLK_PWRMGT_CNTL); OUTPLL(CLK_PWRMGT_CNTL , temp | 0x00000400); // Turn off display clock if using mobile chips temp = INPLL(CLK_PWRMGT_CNTL); OUTREG(CLK_PWRMGT_CNTL , temp | 0x00100000); // Force PIXCLK_ALWAYS_ON and PIXCLK_DAC_ALWAYS_ON temp = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, temp & ~0x000000c0); // Force ECP_FORCE_ON to 1 temp = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, temp | 0x00040000); // Force PIXCLK_BLEND_ALWAYS_ON and PIXCLK_GV_ALWAYS_ON temp = INPLL(PIXCLKS_CNTL); OUTPLL(PIXCLKS_CNTL, temp & ~0x00001800); // Forcing SCLK_CNTL to ON OUTPLL(SCLK_CNTL, (INPLL(SCLK_CNTL)& 0x00000007) | 0xffff8000 ); // Set PM control over XTALIN pad temp = INPLL(CLK_PIN_CNTL); OUTPLL(CLK_PIN_CNTL, temp | 0x00080000); // Force MCLK and YCLK and MC as dynamic temp = INPLL(MCLK_CNTL); OUTPLL(MCLK_CNTL, temp & 0xffeaffff); // PLL_TURNOFF temp = INPLL(PLL_PWRMGT_CNTL); OUTPLL(PLL_PWRMGT_CNTL, temp | 0x0000001f); // set MOBILE_SU to 1 if M6 or DDR64 is detected temp = INPLL(PLL_PWRMGT_CNTL); OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00010000); // select PM access mode (PM_MODE_SEL) (use ACPI mode) // temp = INPLL(PLL_PWRMGT_CNTL); // OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00002000); temp = INPLL(PLL_PWRMGT_CNTL); OUTPLL(PLL_PWRMGT_CNTL, temp & ~0x00002000); // set DISP_MISC_CNTL register disp_misc_cntl = INREG(DISP_MISC_CNTL); disp_misc_cntl &= ~( DISP_MISC_CNTL_SOFT_RESET_GRPH_PP | DISP_MISC_CNTL_SOFT_RESET_SUBPIC_PP | DISP_MISC_CNTL_SOFT_RESET_OV0_PP | DISP_MISC_CNTL_SOFT_RESET_GRPH_SCLK | DISP_MISC_CNTL_SOFT_RESET_SUBPIC_SCLK | DISP_MISC_CNTL_SOFT_RESET_OV0_SCLK | DISP_MISC_CNTL_SOFT_RESET_GRPH2_PP | DISP_MISC_CNTL_SOFT_RESET_GRPH2_SCLK | DISP_MISC_CNTL_SOFT_RESET_LVDS | DISP_MISC_CNTL_SOFT_RESET_TMDS | DISP_MISC_CNTL_SOFT_RESET_DIG_TMDS | DISP_MISC_CNTL_SOFT_RESET_TV); OUTREG(DISP_MISC_CNTL, disp_misc_cntl); // set DISP_PWR_MAN register disp_pwr_man = INREG(DISP_PWR_MAN); // clau - 9.29.2000 - changes made to bit23:18 to set to 1 as requested by George disp_pwr_man |= (DISP_PWR_MAN_DIG_TMDS_ENABLE_RST | DISP_PWR_MAN_TV_ENABLE_RST | // DISP_PWR_MAN_AUTO_PWRUP_EN | DISP_PWR_MAN_DISP_D3_GRPH_RST | DISP_PWR_MAN_DISP_D3_SUBPIC_RST | DISP_PWR_MAN_DISP_D3_OV0_RST | DISP_PWR_MAN_DISP_D1D2_GRPH_RST | DISP_PWR_MAN_DISP_D1D2_SUBPIC_RST | DISP_PWR_MAN_DISP_D1D2_OV0_RST); disp_pwr_man &= ~(DISP_PWR_MAN_DISP_PWR_MAN_D3_CRTC_EN | DISP_PWR_MAN_DISP2_PWR_MAN_D3_CRTC2_EN| DISP_PWR_MAN_DISP_D3_RST | DISP_PWR_MAN_DISP_D3_REG_RST); OUTREG(DISP_PWR_MAN, disp_pwr_man); // clau - 10.24.2000 // - add in setting for BUS_CNTL1 b27:26 = 0x01 and b31 = 0x1 // - add in setting for AGP_CNTL b7:0 = 0x20 // - add in setting for DVI_DDC_DATA_OUT_EN b17:16 = 0x0 // the following settings (two lines) are applied at a later part of this function, only on mobile platform // requres -mobile flag OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & 0xf3ffffff) | 0x04000000); OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | 0x80000000); OUTREG(AGP_CNTL, (INREG(AGP_CNTL) & 0xffffff00) | 0x20); OUTREG(GPIO_DVI_DDC, INREG(GPIO_DVI_DDC) & 0xfffcffff); // yulee - 12.12.2000 // A12 only // EN_MCLK_TRISTATE_IN_SUSPEND@MCLK_MISC = 1 // ACCESS_REGS_IN_SUSPEND@CLK_PIN_CNTL = 0 // only on mobile platform OUTPLL(MCLK_MISC, INPLL(MCLK_MISC) | 0x00040000 ); // yulee -12.12.2000 // AGPCLK_VALID@BUS_CNTL1 = 1 // MOBILE_PLATFORM_SEL@BUS_CNTL1 = 01 // CRTC_STEREO_SYNC_OUT_EN@CRTC_OFFSET_CNTL = 0 // CG_CLK_TO_OUTPIN@CLK_PIN_CNTL = 0 // only on mobile platform OUTPLL(CLK_PIN_CNTL, INPLL(CLK_PIN_CNTL ) & 0xFFFFF7FF ); OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1 ) & 0xF3FFFFFF) | 0x84000000 ); OUTREG(CRTC_OFFSET_CNTL, INREG(CRTC_OFFSET_CNTL ) & 0xFFEFFFFF ); mdelay(100); #endif /* Disable CRTCs */ OUTREG(CRTC_GEN_CNTL, (INREG(CRTC_GEN_CNTL) & ~CRTC_EN) | CRTC_DISP_REQ_EN_B); OUTREG(CRTC2_GEN_CNTL, (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B); (void)INREG(CRTC2_GEN_CNTL); mdelay(17); } static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) { u16 pwr_cmd; if (!rinfo->pm_reg) return; /* Set the chip into appropriate suspend mode (we use D2, * D3 would require a compete re-initialization of the chip, * including PCI config registers, clocks, AGP conf, ...) */ if (suspend) { /* According to ATI, we should program V2CLK here, I have * to verify what's up exactly */ /* Save some registers */ radeon_pm_save_regs(rinfo); /* Check that on M7 too, might work might not. M7 may also * need explicit enabling of PM */ if (rinfo->arch == RADEON_M6) { /* Program V2CLK */ radeon_pm_program_v2clk(rinfo); /* Disable IO PADs */ radeon_pm_disable_iopad(rinfo); /* Set low current */ radeon_pm_low_current(rinfo); /* Prepare chip for power management */ radeon_pm_setup_for_suspend(rinfo); /* Reset the MDLL */ OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) | MCKOA_RESET); (void)INPLL(MDLL_RDCKA); OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET); (void)INPLL(MDLL_RDCKA); } /* Switch PCI power managment to D2. */ for (;;) { pci_read_config_word( rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, &pwr_cmd); if (pwr_cmd & 2) break; pci_write_config_word( rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2); mdelay(500); } } else { /* Switch back PCI powermanagment to D0 */ mdelay(200); pci_write_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, 0); mdelay(500); dbg_clk = INPLL(1); /* Do we need that on M7 ? */ if (rinfo->arch == RADEON_M6) { /* Restore the MDLL */ OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET); (void)INPLL(MDLL_CKO); } /* Restore some registers */ radeon_pm_restore_regs(rinfo); } } /* * Save the contents of the framebuffer when we go to sleep, * and restore it when we wake up again. */ int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when) { struct radeonfb_info *rinfo; for (rinfo = board_list; rinfo != NULL; rinfo = rinfo->next) { struct fb_fix_screeninfo fix; int nb; struct display *disp; disp = (rinfo->currcon < 0) ? rinfo->info.disp : &fb_display[rinfo->currcon]; switch (rinfo->arch) { case RADEON_M6: case RADEON_M7: case RADEON_M9: break; default: return PBOOK_SLEEP_REFUSE; } radeonfb_get_fix(&fix, fg_console, (struct fb_info *)rinfo); nb = fb_display[fg_console].var.yres * fix.line_length; switch (when) { case PBOOK_SLEEP_NOW: acquire_console_sem(); disp->dispsw = &fbcon_dummy; if (!noaccel) { /* Make sure engine is reset */ radeon_engine_reset(); radeon_engine_idle(); } /* Blank display and LCD */ radeonfb_blank(VESA_POWERDOWN+1, (struct fb_info *)rinfo); /* Sleep */ rinfo->asleep = 1; radeon_set_suspend(rinfo, 1); release_console_sem(); break; case PBOOK_WAKE: acquire_console_sem(); /* Wakeup */ radeon_set_suspend(rinfo, 0); if (!noaccel) radeon_engine_init(rinfo); rinfo->asleep = 0; radeon_set_dispsw(rinfo, disp); radeon_load_video_mode(rinfo, &disp->var); do_install_cmap(rinfo->currcon < 0 ? 0 : rinfo->currcon, (struct fb_info *)rinfo); radeonfb_blank(0, (struct fb_info *)rinfo); release_console_sem(); printk("CLK_PIN_CNTL on wakeup was: %08x\n", dbg_clk); break; } } return PBOOK_SLEEP_OK; } #endif /* CONFIG_PMAC_PBOOK */ static int radeonfb_pci_register (struct pci_dev *pdev, const struct pci_device_id *ent) { struct radeonfb_info *rinfo; struct radeon_chip_info *rci = &radeon_chip_info[ent->driver_data]; u32 tmp; RTRACE("radeonfb_pci_register BEGIN\n"); /* Enable device in PCI config */ if (pci_enable_device(pdev) != 0) { printk(KERN_ERR "radeonfb: Cannot enable PCI device\n"); return -ENODEV; } rinfo = kmalloc (sizeof (struct radeonfb_info), GFP_KERNEL); if (!rinfo) { printk ("radeonfb: could not allocate memory\n"); return -ENODEV; } memset (rinfo, 0, sizeof (struct radeonfb_info)); //info = &rinfo->info; rinfo->pdev = pdev; strcpy(rinfo->name, rci->name); rinfo->arch = rci->arch; /* Set base addrs */ rinfo->fb_base_phys = pci_resource_start (pdev, 0); rinfo->mmio_base_phys = pci_resource_start (pdev, 2); /* request the mem regions */ if (!request_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0), "radeonfb")) { printk ("radeonfb: cannot reserve FB region\n"); kfree (rinfo); return -ENODEV; } if (!request_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2), "radeonfb")) { printk ("radeonfb: cannot reserve MMIO region\n"); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); return -ENODEV; } /* map the regions */ rinfo->mmio_base = ioremap (rinfo->mmio_base_phys, RADEON_REGSIZE); if (!rinfo->mmio_base) { printk ("radeonfb: cannot map MMIO\n"); release_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2)); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); return -ENODEV; } rinfo->chipset = pdev->device; switch (rinfo->arch) { case RADEON_R100: rinfo->hasCRTC2 = 0; break; default: /* all the rest have it */ rinfo->hasCRTC2 = 1; break; } #if 0 if (rinfo->arch == RADEON_M7) { /* * Noticed some errors in accel with M7, will have to work these out... */ noaccel = 1; } #endif if (mirror) printk("radeonfb: mirroring display to CRT\n"); /* framebuffer size */ tmp = INREG(CONFIG_MEMSIZE); /* mem size is bits [28:0], mask off the rest */ rinfo->video_ram = tmp & CONFIG_MEMSIZE_MASK; /* ram type */ tmp = INREG(MEM_SDRAM_MODE_REG); switch ((MEM_CFG_TYPE & tmp) >> 30) { case 0: /* SDR SGRAM (2:1) */ strcpy(rinfo->ram_type, "SDR SGRAM"); rinfo->ram.ml = 4; rinfo->ram.mb = 4; rinfo->ram.trcd = 1; rinfo->ram.trp = 2; rinfo->ram.twr = 1; rinfo->ram.cl = 2; rinfo->ram.loop_latency = 16; rinfo->ram.rloop = 16; break; case 1: /* DDR SGRAM */ strcpy(rinfo->ram_type, "DDR SGRAM"); rinfo->ram.ml = 4; rinfo->ram.mb = 4; rinfo->ram.trcd = 3; rinfo->ram.trp = 3; rinfo->ram.twr = 2; rinfo->ram.cl = 3; rinfo->ram.tr2w = 1; rinfo->ram.loop_latency = 16; rinfo->ram.rloop = 16; break; default: /* 64-bit SDR SGRAM */ strcpy(rinfo->ram_type, "SDR SGRAM 64"); rinfo->ram.ml = 4; rinfo->ram.mb = 8; rinfo->ram.trcd = 3; rinfo->ram.trp = 3; rinfo->ram.twr = 1; rinfo->ram.cl = 3; rinfo->ram.tr2w = 1; rinfo->ram.loop_latency = 17; rinfo->ram.rloop = 17; break; } rinfo->bios_seg = radeon_find_rom(rinfo); radeon_get_pllinfo(rinfo, rinfo->bios_seg); /* * Hack to get around some busted production M6's * reporting no ram */ if (rinfo->video_ram == 0) { switch (pdev->device) { case PCI_DEVICE_ID_ATI_RADEON_LY: case PCI_DEVICE_ID_ATI_RADEON_LZ: rinfo->video_ram = 8192 * 1024; break; default: break; } } RTRACE("radeonfb: probed %s %dk videoram\n", (rinfo->ram_type), (rinfo->video_ram/1024)); #if !defined(__powerpc__) radeon_get_moninfo(rinfo); #else switch (pdev->device) { case PCI_DEVICE_ID_ATI_RADEON_LW: case PCI_DEVICE_ID_ATI_RADEON_LX: case PCI_DEVICE_ID_ATI_RADEON_LY: case PCI_DEVICE_ID_ATI_RADEON_LZ: rinfo->dviDisp_type = MT_LCD; break; default: radeon_get_moninfo(rinfo); break; } #endif radeon_get_EDID(rinfo); if ((rinfo->dviDisp_type == MT_DFP) || (rinfo->dviDisp_type == MT_LCD) || (rinfo->crtDisp_type == MT_DFP)) { if (!radeon_get_dfpinfo(rinfo)) { iounmap(rinfo->mmio_base); release_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2)); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); return -ENODEV; } } rinfo->fb_base = ioremap (rinfo->fb_base_phys, rinfo->video_ram); if (!rinfo->fb_base) { printk ("radeonfb: cannot map FB\n"); iounmap(rinfo->mmio_base); release_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2)); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); return -ENODEV; } /* I SHOULD FIX THAT CRAP ! I should probably mimmic XFree DRI * driver setup here. * * On PPC, OF based cards setup the internal memory * mapping in strange ways. We change it so that the * framebuffer is mapped at 0 and given half of the card's * address space (2Gb). AGP is mapped high (0xe0000000) and * can use up to 512Mb. Once DRI is fully implemented, we * will have to setup the PCI remapper to remap the agp_special_page * memory page somewhere between those regions so that the card * use a normal PCI bus master cycle to access the ring read ptr. * --BenH. */ #ifdef CONFIG_ALL_PPC if (rinfo->hasCRTC2) OUTREG(CRTC2_GEN_CNTL, (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B); OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) | CRTC_DISPLAY_DIS); OUTREG(MC_FB_LOCATION, 0x7fff0000); OUTREG(MC_AGP_LOCATION, 0xffffe000); OUTREG(DISPLAY_BASE_ADDR, 0x00000000); if (rinfo->hasCRTC2) OUTREG(CRTC2_DISPLAY_BASE_ADDR, 0x00000000); OUTREG(SRC_OFFSET, 0x00000000); OUTREG(DST_OFFSET, 0x00000000); mdelay(10); OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) & ~CRTC_DISPLAY_DIS); #endif /* CONFIG_ALL_PPC */ /* save current mode regs before we switch into the new one * so we can restore this upon __exit */ radeon_save_state (rinfo, &rinfo->init_state); /* set all the vital stuff */ radeon_set_fbinfo (rinfo); pci_set_drvdata(pdev, rinfo); rinfo->next = board_list; board_list = rinfo; ((struct fb_info *) rinfo)->device = &pdev->dev; if (register_framebuffer ((struct fb_info *) rinfo) < 0) { printk ("radeonfb: could not register framebuffer\n"); iounmap(rinfo->fb_base); iounmap(rinfo->mmio_base); release_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2)); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); return -ENODEV; } #ifdef CONFIG_MTRR rinfo->mtrr_hdl = nomtrr ? -1 : mtrr_add(rinfo->fb_base_phys, rinfo->video_ram, MTRR_TYPE_WRCOMB, 1); #endif #ifdef CONFIG_PMAC_BACKLIGHT if (rinfo->dviDisp_type == MT_LCD) register_backlight_controller(&radeon_backlight_controller, rinfo, "ati"); #endif #ifdef CONFIG_PMAC_PBOOK if (rinfo->dviDisp_type == MT_LCD) { rinfo->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); pmu_register_sleep_notifier(&radeon_sleep_notifier); } #endif printk ("radeonfb: ATI Radeon %s %s %d MB\n", rinfo->name, rinfo->ram_type, (rinfo->video_ram/(1024*1024))); if (rinfo->hasCRTC2) { printk("radeonfb: DVI port %s monitor connected\n", GET_MON_NAME(rinfo->dviDisp_type)); printk("radeonfb: CRT port %s monitor connected\n", GET_MON_NAME(rinfo->crtDisp_type)); } else { printk("radeonfb: CRT port %s monitor connected\n", GET_MON_NAME(rinfo->crtDisp_type)); } RTRACE("radeonfb_pci_register END\n"); return 0; } static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev) { struct radeonfb_info *rinfo = pci_get_drvdata(pdev); if (!rinfo) return; /* restore original state * * Doesn't quite work yet, possibly because of the PPC hacking * I do on startup, disable for now. --BenH */ radeon_write_mode (rinfo, &rinfo->init_state); #ifdef CONFIG_MTRR if (rinfo->mtrr_hdl >= 0) mtrr_del(rinfo->mtrr_hdl, 0, 0); #endif unregister_framebuffer ((struct fb_info *) rinfo); iounmap(rinfo->mmio_base); iounmap(rinfo->fb_base); release_mem_region (rinfo->mmio_base_phys, pci_resource_len(pdev, 2)); release_mem_region (rinfo->fb_base_phys, pci_resource_len(pdev, 0)); kfree (rinfo); } static struct pci_driver radeonfb_driver = { .name = "radeonfb", .id_table = radeonfb_pci_table, .probe = radeonfb_pci_register, .remove = __devexit_p(radeonfb_pci_unregister), }; int __init radeonfb_old_setup (char *options); int __init radeonfb_old_init (void) { #ifndef MODULE char *option = NULL; if (fb_get_options("radeonfb_old", &option)) return -ENODEV; radeonfb_old_setup(option); #endif return pci_module_init (&radeonfb_driver); } void __exit radeonfb_old_exit (void) { pci_unregister_driver (&radeonfb_driver); } int __init radeonfb_old_setup (char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep (&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "noaccel", 7)) { noaccel = 1; } else if (!strncmp(this_opt, "mirror", 6)) { mirror = 1; } else if (!strncmp(this_opt, "dfp", 3)) { force_dfp = 1; } else if (!strncmp(this_opt, "panel_yres:", 11)) { panel_yres = simple_strtoul((this_opt+11), NULL, 0); } else if (!strncmp(this_opt, "nomtrr", 6)) { nomtrr = 1; } else mode_option = this_opt; } return 0; } module_init(radeonfb_old_init); #ifdef MODULE module_exit(radeonfb_old_exit); #endif MODULE_AUTHOR("Ani Joshi"); MODULE_DESCRIPTION("framebuffer driver for ATI Radeon chipset"); MODULE_LICENSE("GPL");
Java
/* * The Mana Server * Copyright (C) 2006-2010 The Mana World Development Team * * This file is part of The Mana Server. * * The Mana Server is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * The Mana Server is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with The Mana Server. If not, see <http://www.gnu.org/licenses/>. */ #include <cstdlib> #include <zlib.h> #include "utils/zlib.h" #include "utils/logger.h" static void logZlibError(int error) { switch (error) { case Z_MEM_ERROR: LOG_ERROR("Out of memory while decompressing data!"); break; case Z_VERSION_ERROR: LOG_ERROR("Incompatible zlib version!"); break; case Z_DATA_ERROR: LOG_ERROR("Incorrect zlib compressed data!"); break; default: LOG_ERROR("Unknown error while decompressing data!"); } } bool inflateMemory(char *in, unsigned inLength, char *&out, unsigned &outLength) { int bufferSize = 256 * 1024; int ret; z_stream strm; out = (char *)malloc(bufferSize); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = (Bytef *)in; strm.avail_in = inLength; strm.next_out = (Bytef *)out; strm.avail_out = bufferSize; ret = inflateInit2(&strm, 15 + 32); if (ret != Z_OK) { logZlibError(ret); free(out); return false; } do { ret = inflate(&strm, Z_SYNC_FLUSH); switch (ret) { case Z_NEED_DICT: case Z_STREAM_ERROR: ret = Z_DATA_ERROR; case Z_DATA_ERROR: case Z_MEM_ERROR: inflateEnd(&strm); logZlibError(ret); free(out); return false; } if (ret != Z_STREAM_END) { out = (char *)realloc(out, bufferSize * 2); if (!out) { inflateEnd(&strm); logZlibError(Z_MEM_ERROR); free(out); return false; } strm.next_out = (Bytef *)(out + bufferSize); strm.avail_out = bufferSize; bufferSize *= 2; } } while (ret != Z_STREAM_END); if (strm.avail_in != 0) { logZlibError(Z_DATA_ERROR); free(out); return false; } outLength = bufferSize - strm.avail_out; inflateEnd(&strm); return true; }
Java
<?php class InstallDocFormatterTest extends \MediaWikiUnitTestCase { /** * @covers InstallDocFormatter * @dataProvider provideDocFormattingTests */ public function testFormat( $expected, $unformattedText, $message = '' ) { $this->assertEquals( $expected, InstallDocFormatter::format( $unformattedText ), $message ); } /** * Provider for testFormat() */ public static function provideDocFormattingTests() { # Format: (expected string, unformattedText string, optional message) return [ # Escape some wikitext [ 'Install &lt;tag>', 'Install <tag>', 'Escaping <' ], [ 'Install &#123;&#123;template}}', 'Install {{template}}', 'Escaping [[' ], [ 'Install &#91;&#91;page]]', 'Install [[page]]', 'Escaping {{' ], [ 'Install &#95;&#95;TOC&#95;&#95;', 'Install __TOC__', 'Escaping __' ], [ 'Install ', "Install \r", 'Removing \r' ], # Transform \t{1,2} into :{1,2} [ ':One indentation', "\tOne indentation", 'Replacing a single \t' ], [ '::Two indentations', "\t\tTwo indentations", 'Replacing 2 x \t' ], # Transform 'T123' links [ '<span class="config-plainlink">[https://phabricator.wikimedia.org/T123 T123]</span>', 'T123', 'Testing T123 links' ], [ 'bug <span class="config-plainlink">[https://phabricator.wikimedia.org/T123 T123]</span>', 'bug T123', 'Testing bug T123 links' ], [ '(<span class="config-plainlink">[https://phabricator.wikimedia.org/T987654 T987654]</span>)', '(T987654)', 'Testing (T987654) links' ], # "Tabc" shouldn't work [ 'Tfoobar', 'Tfoobar', "Don't match T followed by non-digits" ], [ 'T!!fakefake!!', 'T!!fakefake!!', "Don't match T followed by non-digits" ], # Transform 'bug 123' links [ '<span class="config-plainlink">[https://bugzilla.wikimedia.org/123 bug 123]</span>', 'bug 123', 'Testing bug 123 links' ], [ '(<span class="config-plainlink">[https://bugzilla.wikimedia.org/987654 bug 987654]</span>)', '(bug 987654)', 'Testing (bug 987654) links' ], # "bug abc" shouldn't work [ 'bug foobar', 'bug foobar', "Don't match bug followed by non-digits" ], [ 'bug !!fakefake!!', 'bug !!fakefake!!', "Don't match bug followed by non-digits" ], # Transform '$wgFooBar' links [ '<span class="config-plainlink">' . '[https://www.mediawiki.org/wiki/Manual:$wgFooBar $wgFooBar]</span>', '$wgFooBar', 'Testing basic $wgFooBar' ], [ '<span class="config-plainlink">' . '[https://www.mediawiki.org/wiki/Manual:$wgFooBar45 $wgFooBar45]</span>', '$wgFooBar45', 'Testing $wgFooBar45 (with numbers)' ], [ '<span class="config-plainlink">' . '[https://www.mediawiki.org/wiki/Manual:$wgFoo_Bar $wgFoo_Bar]</span>', '$wgFoo_Bar', 'Testing $wgFoo_Bar (with underscore)' ], # Icky variables that shouldn't link [ '$myAwesomeVariable', '$myAwesomeVariable', 'Testing $myAwesomeVariable (not starting with $wg)' ], [ '$()not!a&Var', '$()not!a&Var', 'Testing $()not!a&Var (obviously not a variable)' ], ]; } }
Java
/* * efs_fs_i.h * * Copyright (c) 1999 Al Smith * * Portions derived from IRIX header files (c) 1988 Silicon Graphics */ #ifndef __EFS_FS_I_H__ #define __EFS_FS_I_H__ typedef int32_t efs_block_t; typedef uint32_t efs_ino_t; #define EFS_DIRECTEXTENTS 12 /* * layout of an extent, in memory and on disk. 8 bytes exactly. */ typedef union extent_u { unsigned char raw[8]; struct extent_s { unsigned int ex_magic:8; /* magic # (zero) */ unsigned int ex_bn:24; /* basic block */ unsigned int ex_length:8; /* numblocks in this extent */ unsigned int ex_offset:24; /* logical offset into file */ } cooked; } efs_extent; typedef struct edevs { short odev; unsigned int ndev; } efs_devs; /* * extent based filesystem inode as it appears on disk. The efs inode * is exactly 128 bytes long. */ struct efs_dinode { u_short di_mode; /* mode and type of file */ short di_nlink; /* number of links to file */ u_short di_uid; /* owner's user id */ u_short di_gid; /* owner's group id */ int32_t di_size; /* number of bytes in file */ int32_t di_atime; /* time last accessed */ int32_t di_mtime; /* time last modified */ int32_t di_ctime; /* time created */ uint32_t di_gen; /* generation number */ short di_numextents; /* # of extents */ u_char di_version; /* version of inode */ u_char di_spare; /* spare - used by AFS */ union di_addr { efs_extent di_extents[EFS_DIRECTEXTENTS]; efs_devs di_dev; /* device for IFCHR/IFBLK */ } di_u; }; /* efs inode storage in memory */ struct efs_inode_info { int numextents; int lastextent; efs_extent extents[EFS_DIRECTEXTENTS]; struct inode vfs_inode; }; #endif /* __EFS_FS_I_H__ */
Java
import React from "react"; import PropTypes from "prop-types"; import Box from "grommet/components/Box"; import Paragraph from "grommet/components/Paragraph"; import Label from "grommet/components/Label"; import FormLayer from "../components/FormLayer"; class LayerObjectFieldTemplate extends React.Component { constructor(props) { super(props); this.state = { layerActive: false }; } _onClick() { this.setState({ layerActive: true }); } render() { if (this.props.idSchema["$id"] == "root") { return <Box>{this.props.properties.map(prop => prop.content)}</Box>; } else { return ( <Box className="grommetux-form-field" direction="row" wrap={false}> { <FormLayer layerActive={this.state.layerActive} onClose={(() => { this.setState({ layerActive: false }); }).bind(this)} properties={this.props.properties.map(prop => prop.content)} /> } <Box flex={true}> <Box align="center"> <Label size="small" strong="none" uppercase={true}> {this.props.title} </Label> </Box> {this.props.description ? ( <Paragraph size="small">{this.props.description}</Paragraph> ) : null} </Box> </Box> ); } } } LayerObjectFieldTemplate.propTypes = { title: PropTypes.string, description: PropTypes.string, required: PropTypes.bool, idSchema: PropTypes.object, uiSchema: PropTypes.object, properties: PropTypes.object }; export default LayerObjectFieldTemplate;
Java
/*************************************************************************** * Copyright (C) 2011 by Broadcom Corporation * * Evan Hunter - [email protected] * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * ***************************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "rtos.h" static const struct stack_register_offset rtos_embkernel_Cortex_M_stack_offsets[] = { { 0x24, 32 }, /* r0 */ { 0x28, 32 }, /* r1 */ { 0x2c, 32 }, /* r2 */ { 0x30, 32 }, /* r3 */ { 0x00, 32 }, /* r4 */ { 0x04, 32 }, /* r5 */ { 0x08, 32 }, /* r6 */ { 0x0c, 32 }, /* r7 */ { 0x10, 32 }, /* r8 */ { 0x14, 32 }, /* r9 */ { 0x18, 32 }, /* r10 */ { 0x1c, 32 }, /* r11 */ { 0x34, 32 }, /* r12 */ { -2, 32 }, /* sp */ { 0x38, 32 }, /* lr */ { 0x3c, 32 }, /* pc */ { -1, 96 }, /* FPA1 */ { -1, 96 }, /* FPA2 */ { -1, 96 }, /* FPA3 */ { -1, 96 }, /* FPA4 */ { -1, 96 }, /* FPA5 */ { -1, 96 }, /* FPA6 */ { -1, 96 }, /* FPA7 */ { -1, 96 }, /* FPA8 */ { -1, 32 }, /* FPS */ { 0x40, 32 }, /* xPSR */ }; const struct rtos_register_stacking rtos_embkernel_Cortex_M_stacking = { 0x40, /* stack_registers_size */ -1, /* stack_growth_direction */ 26, /* num_output_registers */ 8, /* stack_alignment */ rtos_embkernel_Cortex_M_stack_offsets /* register_offsets */ };
Java
/* * tkMacMenubutton.c -- * * This file implements the Macintosh specific portion of the * menubutton widget. * * Copyright (c) 1996 by Sun Microsystems, Inc. * * See the file "license.terms" for information on usage and redistribution * of this file, and for a DISCLAIMER OF ALL WARRANTIES. * * RCS: @(#) $Id$ */ #include "tkMenubutton.h" #include "tkMacInt.h" #include <Controls.h> #define kShadowOffset (3) /* amount to offset shadow from frame */ #define kTriangleWidth (11) /* width of the triangle */ #define kTriangleHeight (6) /* height of the triangle */ #define kTriangleMargin (5) /* margin around triangle */ /* * Declaration of Unix specific button structure. */ typedef struct MacMenuButton { TkMenuButton info; /* Generic button info. */ } MacMenuButton; /* * The structure below defines menubutton class behavior by means of * procedures that can be invoked from generic window code. */ TkClassProcs tkpMenubuttonClass = { NULL, /* createProc. */ TkMenuButtonWorldChanged, /* geometryProc. */ NULL /* modalProc. */ }; /* *---------------------------------------------------------------------- * * TkpCreateMenuButton -- * * Allocate a new TkMenuButton structure. * * Results: * Returns a newly allocated TkMenuButton structure. * * Side effects: * Registers an event handler for the widget. * *---------------------------------------------------------------------- */ TkMenuButton * TkpCreateMenuButton( Tk_Window tkwin) { MacMenuButton *butPtr = (MacMenuButton *)ckalloc(sizeof(MacMenuButton)); return (TkMenuButton *) butPtr; } /* *---------------------------------------------------------------------- * * TkpDisplayMenuButton -- * * This procedure is invoked to display a menubutton widget. * * Results: * None. * * Side effects: * Commands are output to X to display the menubutton in its * current mode. * *---------------------------------------------------------------------- */ void TkpDisplayMenuButton( ClientData clientData) /* Information about widget. */ { TkMenuButton *mbPtr = (TkMenuButton *) clientData; GC gc; Tk_3DBorder border; int x = 0; /* Initialization needed only to stop * compiler warning. */ int y; Tk_Window tkwin = mbPtr->tkwin; int width, height; MacMenuButton * macMBPtr = (MacMenuButton *) mbPtr; GWorldPtr destPort; CGrafPtr saveWorld; GDHandle saveDevice; MacDrawable *macDraw; mbPtr->flags &= ~REDRAW_PENDING; if ((mbPtr->tkwin == NULL) || !Tk_IsMapped(tkwin)) { return; } GetGWorld(&saveWorld, &saveDevice); destPort = TkMacGetDrawablePort(Tk_WindowId(tkwin)); SetGWorld(destPort, NULL); macDraw = (MacDrawable *) Tk_WindowId(tkwin); if ((mbPtr->state == STATE_DISABLED) && (mbPtr->disabledFg != NULL)) { gc = mbPtr->disabledGC; } else if ((mbPtr->state == STATE_ACTIVE) && !Tk_StrictMotif(mbPtr->tkwin)) { gc = mbPtr->activeTextGC; } else { gc = mbPtr->normalTextGC; } border = mbPtr->normalBorder; /* * In order to avoid screen flashes, this procedure redraws * the menu button in a pixmap, then copies the pixmap to the * screen in a single operation. This means that there's no * point in time where the on-sreen image has been cleared. */ Tk_Fill3DRectangle(tkwin, Tk_WindowId(tkwin), border, 0, 0, Tk_Width(tkwin), Tk_Height(tkwin), 0, TK_RELIEF_FLAT); /* * Display image or bitmap or text for button. */ if (mbPtr->image != None) { Tk_SizeOfImage(mbPtr->image, &width, &height); imageOrBitmap: TkComputeAnchor(mbPtr->anchor, tkwin, 0, 0, width + mbPtr->indicatorWidth, height, &x, &y); if (mbPtr->image != NULL) { Tk_RedrawImage(mbPtr->image, 0, 0, width, height, Tk_WindowId(tkwin), x, y); } else { XCopyPlane(mbPtr->display, mbPtr->bitmap, Tk_WindowId(tkwin), gc, 0, 0, (unsigned) width, (unsigned) height, x, y, 1); } } else if (mbPtr->bitmap != None) { Tk_SizeOfBitmap(mbPtr->display, mbPtr->bitmap, &width, &height); goto imageOrBitmap; } else { TkComputeAnchor(mbPtr->anchor, tkwin, mbPtr->padX, mbPtr->padY, mbPtr->textWidth + mbPtr->indicatorWidth, mbPtr->textHeight, &x, &y); Tk_DrawTextLayout(mbPtr->display, Tk_WindowId(tkwin), gc, mbPtr->textLayout, x, y, 0, -1); } /* * If the menu button is disabled with a stipple rather than a special * foreground color, generate the stippled effect. */ if ((mbPtr->state == STATE_DISABLED) && ((mbPtr->disabledFg != NULL) || (mbPtr->image != NULL))) { XFillRectangle(mbPtr->display, Tk_WindowId(tkwin), mbPtr->disabledGC, mbPtr->inset, mbPtr->inset, (unsigned) (Tk_Width(tkwin) - 2*mbPtr->inset), (unsigned) (Tk_Height(tkwin) - 2*mbPtr->inset)); } /* * Draw the cascade indicator for the menu button on the * right side of the window, if desired. */ if (mbPtr->indicatorOn) { int w, h, i; Rect r; r.left = macDraw->xOff + Tk_Width(tkwin) - mbPtr->inset - mbPtr->indicatorWidth; r.top = macDraw->yOff + Tk_Height(tkwin)/2 - mbPtr->indicatorHeight/2; r.right = macDraw->xOff + Tk_Width(tkwin) - mbPtr->inset - kTriangleMargin; r.bottom = macDraw->yOff + Tk_Height(tkwin)/2 + mbPtr->indicatorHeight/2; h = mbPtr->indicatorHeight; w = mbPtr->indicatorWidth - 1 - kTriangleMargin; for (i = 0; i < h; i++) { MoveTo(r.left + i, r.top + i); LineTo(r.left + i + w, r.top + i); w -= 2; } } /* * Draw the border and traversal highlight last. This way, if the * menu button's contents overflow onto the border they'll be covered * up by the border. */ TkMacSetUpClippingRgn(Tk_WindowId(tkwin)); if (mbPtr->borderWidth > 0) { Rect r; r.left = macDraw->xOff + mbPtr->highlightWidth + mbPtr->borderWidth; r.top = macDraw->yOff + mbPtr->highlightWidth + mbPtr->borderWidth; r.right = macDraw->xOff + Tk_Width(tkwin) - mbPtr->highlightWidth - mbPtr->borderWidth; r.bottom = macDraw->yOff + Tk_Height(tkwin) - mbPtr->highlightWidth - mbPtr->borderWidth; FrameRect(&r); PenSize(mbPtr->borderWidth - 1, mbPtr->borderWidth - 1); MoveTo(r.right, r.top + kShadowOffset); LineTo(r.right, r.bottom); LineTo(r.left + kShadowOffset, r.bottom); } if (mbPtr->highlightWidth != 0) { GC fgGC, bgGC; bgGC = Tk_GCForColor(mbPtr->highlightBgColorPtr, Tk_WindowId(tkwin)); if (mbPtr->flags & GOT_FOCUS) { fgGC = Tk_GCForColor(mbPtr->highlightColorPtr, Tk_WindowId(tkwin)); TkpDrawHighlightBorder(tkwin, fgGC, bgGC, mbPtr->highlightWidth, Tk_WindowId(tkwin)); } else { TkpDrawHighlightBorder(tkwin, bgGC, bgGC, mbPtr->highlightWidth, Tk_WindowId(tkwin)); } } SetGWorld(saveWorld, saveDevice); } /* *---------------------------------------------------------------------- * * TkpDestroyMenuButton -- * * Free data structures associated with the menubutton control. * * Results: * None. * * Side effects: * Restores the default control state. * *---------------------------------------------------------------------- */ void TkpDestroyMenuButton( TkMenuButton *mbPtr) { } /* *---------------------------------------------------------------------- * * TkpComputeMenuButtonGeometry -- * * After changes in a menu button's text or bitmap, this procedure * recomputes the menu button's geometry and passes this information * along to the geometry manager for the window. * * Results: * None. * * Side effects: * The menu button's window may change size. * *---------------------------------------------------------------------- */ void TkpComputeMenuButtonGeometry(mbPtr) register TkMenuButton *mbPtr; /* Widget record for menu button. */ { int width, height, mm, pixels; mbPtr->inset = mbPtr->highlightWidth + mbPtr->borderWidth; if (mbPtr->image != None) { Tk_SizeOfImage(mbPtr->image, &width, &height); if (mbPtr->width > 0) { width = mbPtr->width; } if (mbPtr->height > 0) { height = mbPtr->height; } } else if (mbPtr->bitmap != None) { Tk_SizeOfBitmap(mbPtr->display, mbPtr->bitmap, &width, &height); if (mbPtr->width > 0) { width = mbPtr->width; } if (mbPtr->height > 0) { height = mbPtr->height; } } else { Tk_FreeTextLayout(mbPtr->textLayout); mbPtr->textLayout = Tk_ComputeTextLayout(mbPtr->tkfont, mbPtr->text, -1, mbPtr->wrapLength, mbPtr->justify, 0, &mbPtr->textWidth, &mbPtr->textHeight); width = mbPtr->textWidth; height = mbPtr->textHeight; if (mbPtr->width > 0) { width = mbPtr->width * Tk_TextWidth(mbPtr->tkfont, "0", 1); } if (mbPtr->height > 0) { Tk_FontMetrics fm; Tk_GetFontMetrics(mbPtr->tkfont, &fm); height = mbPtr->height * fm.linespace; } width += 2*mbPtr->padX; height += 2*mbPtr->padY; } if (mbPtr->indicatorOn) { mm = WidthMMOfScreen(Tk_Screen(mbPtr->tkwin)); pixels = WidthOfScreen(Tk_Screen(mbPtr->tkwin)); mbPtr->indicatorHeight= kTriangleHeight; mbPtr->indicatorWidth = kTriangleWidth + kTriangleMargin; width += mbPtr->indicatorWidth; } else { mbPtr->indicatorHeight = 0; mbPtr->indicatorWidth = 0; } Tk_GeometryRequest(mbPtr->tkwin, (int) (width + 2*mbPtr->inset), (int) (height + 2*mbPtr->inset)); Tk_SetInternalBorder(mbPtr->tkwin, mbPtr->inset); }
Java
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/slab.h> #include <linux/iommu.h> #include <linux/msm_kgsl.h> #include <mach/socinfo.h> #include <mach/msm_iomap.h> #include <mach/board.h> #include <mach/iommu_domains.h> #include <stddef.h> #include "kgsl.h" #include "kgsl_device.h" #include "kgsl_mmu.h" #include "kgsl_sharedmem.h" #include "kgsl_iommu.h" #include "adreno_pm4types.h" #include "adreno.h" #include "kgsl_trace.h" #include "z180.h" #include "kgsl_cffdump.h" static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = { { 0, 0 }, /* GLOBAL_BASE */ { 0x0, 1 }, /* SCTLR */ { 0x10, 1 }, /* TTBR0 */ { 0x14, 1 }, /* TTBR1 */ { 0x20, 1 }, /* FSR */ { 0x800, 1 }, /* TLBIALL */ { 0x820, 1 }, /* RESUME */ { 0x03C, 1 }, /* TLBLKCR */ { 0x818, 1 }, /* V2PUR */ { 0x2C, 1 }, /* FSYNR0 */ { 0x30, 1 }, /* FSYNR1 */ { 0, 0 }, /* TLBSYNC, not in v0 */ { 0, 0 }, /* TLBSTATUS, not in v0 */ { 0, 0 } /* IMPLDEF_MICRO_MMU_CRTL, not in v0 */ }; static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = { { 0, 0 }, /* GLOBAL_BASE */ { 0x0, 1 }, /* SCTLR */ { 0x20, 1 }, /* TTBR0 */ { 0x28, 1 }, /* TTBR1 */ { 0x58, 1 }, /* FSR */ { 0x618, 1 }, /* TLBIALL */ { 0x008, 1 }, /* RESUME */ { 0, 0 }, /* TLBLKCR not in V1 */ { 0, 0 }, /* V2PUR not in V1 */ { 0x68, 1 }, /* FSYNR0 */ { 0x6C, 1 }, /* FSYNR1 */ { 0x7F0, 1 }, /* TLBSYNC */ { 0x7F4, 1 }, /* TLBSTATUS */ { 0x2000, 0 } /* IMPLDEF_MICRO_MMU_CRTL */ }; static struct iommu_access_ops *iommu_access_ops; static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags); static phys_addr_t kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu); static void _iommu_lock(struct kgsl_iommu const *iommu) { if (iommu_access_ops && iommu_access_ops->iommu_lock_acquire) iommu_access_ops->iommu_lock_acquire( iommu->sync_lock_initialized); } static void _iommu_unlock(struct kgsl_iommu const *iommu) { if (iommu_access_ops && iommu_access_ops->iommu_lock_release) iommu_access_ops->iommu_lock_release( iommu->sync_lock_initialized); } struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars; /* * One page allocation for a guard region to protect against over-zealous * GPU pre-fetch */ static struct page *kgsl_guard_page; static int get_iommu_unit(struct device *dev, struct kgsl_mmu **mmu_out, struct kgsl_iommu_unit **iommu_unit_out) { int i, j, k; for (i = 0; i < KGSL_DEVICE_MAX; i++) { struct kgsl_mmu *mmu; struct kgsl_iommu *iommu; if (kgsl_driver.devp[i] == NULL) continue; mmu = kgsl_get_mmu(kgsl_driver.devp[i]); if (mmu == NULL || mmu->priv == NULL) continue; iommu = mmu->priv; for (j = 0; j < iommu->unit_count; j++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[j]; for (k = 0; k < iommu_unit->dev_count; k++) { if (iommu_unit->dev[k].dev == dev) { *mmu_out = mmu; *iommu_unit_out = iommu_unit; return 0; } } } } return -EINVAL; } static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit, struct device *dev) { int k; for (k = 0; unit && k < unit->dev_count; k++) { if (unit->dev[k].dev == dev) return &(unit->dev[k]); } return NULL; } /* These functions help find the nearest allocated memory entries on either side * of a faulting address. If we know the nearby allocations memory we can * get a better determination of what we think should have been located in the * faulting region */ /* * A local structure to make it easy to store the interesting bits for the * memory entries on either side of the faulting address */ struct _mem_entry { unsigned int gpuaddr; unsigned int size; unsigned int flags; unsigned int priv; pid_t pid; }; /* * Find the closest alloated memory block with an smaller GPU address then the * given address */ static void _prev_entry(struct kgsl_process_private *priv, unsigned int faultaddr, struct _mem_entry *ret) { struct rb_node *node; struct kgsl_mem_entry *entry; for (node = rb_first(&priv->mem_rb); node; ) { entry = rb_entry(node, struct kgsl_mem_entry, node); if (entry->memdesc.gpuaddr > faultaddr) break; /* * If this is closer to the faulting address, then copy * the entry */ if (entry->memdesc.gpuaddr > ret->gpuaddr) { ret->gpuaddr = entry->memdesc.gpuaddr; ret->size = entry->memdesc.size; ret->flags = entry->memdesc.flags; ret->priv = entry->memdesc.priv; ret->pid = priv->pid; } node = rb_next(&entry->node); } } /* * Find the closest alloated memory block with a greater starting GPU address * then the given address */ static void _next_entry(struct kgsl_process_private *priv, unsigned int faultaddr, struct _mem_entry *ret) { struct rb_node *node; struct kgsl_mem_entry *entry; for (node = rb_last(&priv->mem_rb); node; ) { entry = rb_entry(node, struct kgsl_mem_entry, node); if (entry->memdesc.gpuaddr < faultaddr) break; /* * If this is closer to the faulting address, then copy * the entry */ if (entry->memdesc.gpuaddr < ret->gpuaddr) { ret->gpuaddr = entry->memdesc.gpuaddr; ret->size = entry->memdesc.size; ret->flags = entry->memdesc.flags; ret->priv = entry->memdesc.priv; ret->pid = priv->pid; } node = rb_prev(&entry->node); } } static void _find_mem_entries(struct kgsl_mmu *mmu, unsigned int faultaddr, unsigned int ptbase, struct _mem_entry *preventry, struct _mem_entry *nextentry) { struct kgsl_process_private *private; int id = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase); memset(preventry, 0, sizeof(*preventry)); memset(nextentry, 0, sizeof(*nextentry)); /* Set the maximum possible size as an initial value */ nextentry->gpuaddr = 0xFFFFFFFF; mutex_lock(&kgsl_driver.process_mutex); list_for_each_entry(private, &kgsl_driver.process_list, list) { if (private->pagetable && (private->pagetable->name != id)) continue; spin_lock(&private->mem_lock); _prev_entry(private, faultaddr, preventry); _next_entry(private, faultaddr, nextentry); spin_unlock(&private->mem_lock); } mutex_unlock(&kgsl_driver.process_mutex); } static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry) { char name[32]; memset(name, 0, sizeof(name)); kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags); KGSL_LOG_DUMP(device, "[%8.8X - %8.8X] %s (pid = %d) (%s)\n", entry->gpuaddr, entry->gpuaddr + entry->size, entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "", entry->pid, name); } static void _check_if_freed(struct kgsl_iommu_device *iommu_dev, unsigned long addr, unsigned int pid) { void *base = kgsl_driver.memfree_hist.base_hist_rb; struct kgsl_memfree_hist_elem *wptr; struct kgsl_memfree_hist_elem *p; char name[32]; memset(name, 0, sizeof(name)); mutex_lock(&kgsl_driver.memfree_hist_mutex); wptr = kgsl_driver.memfree_hist.wptr; p = wptr; for (;;) { if (p->size && p->pid == pid) if (addr >= p->gpuaddr && addr < (p->gpuaddr + p->size)) { kgsl_get_memory_usage(name, sizeof(name) - 1, p->flags); KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- premature free ----\n"); KGSL_LOG_DUMP(iommu_dev->kgsldev, "[%8.8X-%8.8X] (%s) was already freed by pid %d\n", p->gpuaddr, p->gpuaddr + p->size, name, p->pid); } p++; if ((void *)p >= base + kgsl_driver.memfree_hist.size) p = (struct kgsl_memfree_hist_elem *) base; if (p == kgsl_driver.memfree_hist.wptr) break; } mutex_unlock(&kgsl_driver.memfree_hist_mutex); } static int kgsl_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long addr, int flags, void *token) { int ret = 0; struct kgsl_mmu *mmu; struct kgsl_iommu *iommu; struct kgsl_iommu_unit *iommu_unit; struct kgsl_iommu_device *iommu_dev; unsigned int ptbase, fsr; unsigned int pid; struct _mem_entry prev, next; unsigned int fsynr0, fsynr1; int write; struct kgsl_device *device; struct adreno_device *adreno_dev; unsigned int no_page_fault_log = 0; unsigned int curr_context_id = 0; unsigned int curr_global_ts = 0; struct kgsl_context *context; ret = get_iommu_unit(dev, &mmu, &iommu_unit); if (ret) goto done; device = mmu->device; adreno_dev = ADRENO_DEVICE(device); if (atomic_read(&mmu->fault)) { if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) ret = -EBUSY; goto done; } iommu_dev = get_iommu_device(iommu_unit, dev); if (!iommu_dev) { KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev); ret = -ENOSYS; goto done; } iommu = mmu->priv; /* * set the fault bits and stuff before any printks so that if fault * handler runs then it will know it's dealing with a pagefault */ kgsl_sharedmem_readl(&device->memstore, &curr_context_id, KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context)); context = kgsl_context_get(device, curr_context_id); if (context != NULL) { kgsl_sharedmem_readl(&device->memstore, &curr_global_ts, KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp)); /* save pagefault timestamp for GFT */ set_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv); context->pagefault_ts = curr_global_ts; kgsl_context_put(context); context = NULL; } atomic_set(&mmu->fault, 1); iommu_dev->fault = 1; if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) { adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT); /* turn off GPU IRQ so we don't get faults from it too */ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); adreno_dispatcher_schedule(device); } ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, TTBR0); fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSR); fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSYNR0); fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSYNR1); if (msm_soc_version_supports_iommu_v0()) write = ((fsynr1 & (KGSL_IOMMU_FSYNR1_AWRITE_MASK << KGSL_IOMMU_FSYNR1_AWRITE_SHIFT)) ? 1 : 0); else write = ((fsynr0 & (KGSL_IOMMU_V1_FSYNR0_WNR_MASK << KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT)) ? 1 : 0); pid = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase); if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE) no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr); if (!no_page_fault_log) { KGSL_MEM_CRIT(iommu_dev->kgsldev, "GPU PAGE FAULT: addr = %lX pid = %d\n", addr, pid); KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X FSYNR0 = %X FSYNR1 = %X(%s fault)\n", iommu_dev->ctx_id, fsr, fsynr0, fsynr1, write ? "write" : "read"); _check_if_freed(iommu_dev, addr, pid); KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n"); _find_mem_entries(mmu, addr, ptbase, &prev, &next); if (prev.gpuaddr) _print_entry(iommu_dev->kgsldev, &prev); else KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n"); KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr); if (next.gpuaddr != 0xFFFFFFFF) _print_entry(iommu_dev->kgsldev, &next); else KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n"); } trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), write ? "write" : "read"); /* * We do not want the h/w to resume fetching data from an iommu unit * that has faulted, this is better for debugging as it will stall * the GPU and trigger a snapshot. To stall the transaction return * EBUSY error. */ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) ret = -EBUSY; done: return ret; } /* * kgsl_iommu_disable_clk - Disable iommu clocks * @mmu - Pointer to mmu structure * * Disables iommu clocks * Return - void */ static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id) { struct kgsl_iommu *iommu = mmu->priv; struct msm_iommu_drvdata *iommu_drvdata; int i, j; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { if (ctx_id != iommu_unit->dev[j].ctx_id) continue; atomic_dec(&iommu_unit->dev[j].clk_enable_count); BUG_ON( atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0); /* * the clock calls have a refcount so call them on every * enable/disable call */ iommu_drvdata = dev_get_drvdata( iommu_unit->dev[j].dev->parent); if (iommu_drvdata->aclk) clk_disable_unprepare(iommu_drvdata->aclk); if (iommu_drvdata->clk) clk_disable_unprepare(iommu_drvdata->clk); clk_disable_unprepare(iommu_drvdata->pclk); } } } /* * kgsl_iommu_disable_clk_event - An event function that is executed when * the required timestamp is reached. It disables the IOMMU clocks if * the timestamp on which the clocks can be disabled has expired. * @device - The kgsl device pointer * @data - The data passed during event creation, it is the MMU pointer * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL * @ts - The current timestamp that has expired for the device * * Disables IOMMU clocks if timestamp has expired * Return - void */ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data, unsigned int id, unsigned int ts, u32 type) { struct kgsl_iommu_disable_clk_param *param = data; if ((0 <= timestamp_cmp(ts, param->ts)) || (KGSL_EVENT_CANCELLED == type)) kgsl_iommu_disable_clk(param->mmu, param->ctx_id); else /* something went wrong with the event handling mechanism */ BUG_ON(1); /* Free param we are done using it */ kfree(param); } /* * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks * @mmu - The kgsl MMU pointer * @ts - Timestamp on which the clocks should be disabled * @ts_valid - Indicates whether ts parameter is valid, if this parameter * is false then it means that the calling function wants to disable the * IOMMU clocks immediately without waiting for any timestamp * @ctx_id: Context id of the IOMMU context for which clocks are to be * turned off * * Creates an event to disable the IOMMU clocks on timestamp and if event * already exists then updates the timestamp of disabling the IOMMU clocks * with the passed in ts if it is greater than the current value at which * the clocks will be disabled * Return - void */ static void kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts, int ctx_id) { struct kgsl_iommu_disable_clk_param *param; param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) { KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*param)); return; } param->mmu = mmu; param->ctx_id = ctx_id; param->ts = ts; if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL, ts, kgsl_iommu_clk_disable_event, param, mmu)) { KGSL_DRV_ERR(mmu->device, "Failed to add IOMMU disable clk event\n"); kfree(param); } } /* * kgsl_iommu_enable_clk - Enable iommu clocks * @mmu - Pointer to mmu structure * @ctx_id - The context bank whose clocks are to be turned on * * Enables iommu clocks of a given context * Return: 0 on success else error code */ static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu, int ctx_id) { int ret = 0; int i, j; struct kgsl_iommu *iommu = mmu->priv; struct msm_iommu_drvdata *iommu_drvdata; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { if (ctx_id != iommu_unit->dev[j].ctx_id) continue; iommu_drvdata = dev_get_drvdata(iommu_unit->dev[j].dev->parent); ret = clk_prepare_enable(iommu_drvdata->pclk); if (ret) goto done; if (iommu_drvdata->clk) { ret = clk_prepare_enable(iommu_drvdata->clk); if (ret) { clk_disable_unprepare( iommu_drvdata->pclk); goto done; } } if (iommu_drvdata->aclk) { ret = clk_prepare_enable(iommu_drvdata->aclk); if (ret) { if (iommu_drvdata->clk) clk_disable_unprepare( iommu_drvdata->clk); clk_disable_unprepare( iommu_drvdata->pclk); goto done; } } atomic_inc(&iommu_unit->dev[j].clk_enable_count); } } done: if (ret) { struct kgsl_iommu_unit *iommu_unit; if (iommu->unit_count == i) i--; iommu_unit = &iommu->iommu_units[i]; do { for (j--; j >= 0; j--) kgsl_iommu_disable_clk(mmu, ctx_id); i--; if (i >= 0) { iommu_unit = &iommu->iommu_units[i]; j = iommu_unit->dev_count; } } while (i >= 0); } return ret; } /* * kgsl_iommu_pt_equal - Check if pagetables are equal * @mmu - Pointer to mmu structure * @pt - Pointer to pagetable * @pt_base - Address of a pagetable that the IOMMU register is * programmed with * * Checks whether the pt_base is equal to the base address of * the pagetable which is contained in the pt structure * Return - Non-zero if the pagetable addresses are equal else 0 */ static int kgsl_iommu_pt_equal(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt, phys_addr_t pt_base) { struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL; phys_addr_t domain_ptbase = iommu_pt ? iommu_get_pt_base_addr(iommu_pt->domain) : 0; /* Only compare the valid address bits of the pt_base */ domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; return domain_ptbase && pt_base && (domain_ptbase == pt_base); } /* * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable * @mmu_specific_pt - Pointer to pagetable which is to be freed * * Return - void */ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt) { struct kgsl_iommu_pt *iommu_pt = pt->priv; if (iommu_pt->domain) msm_unregister_domain(iommu_pt->domain); kfree(iommu_pt); iommu_pt = NULL; } /* * kgsl_iommu_create_pagetable - Create a IOMMU pagetable * * Allocate memory to hold a pagetable and allocate the IOMMU * domain which is the actual IOMMU pagetable * Return - void */ void *kgsl_iommu_create_pagetable(void) { int domain_num; struct kgsl_iommu_pt *iommu_pt; struct msm_iova_partition kgsl_partition = { .start = 0, .size = 0xFFFFFFFF, }; struct msm_iova_layout kgsl_layout = { .partitions = &kgsl_partition, .npartitions = 1, .client_name = "kgsl", .domain_flags = 0, }; iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL); if (!iommu_pt) { KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(struct kgsl_iommu_pt)); return NULL; } /* L2 redirect is not stable on IOMMU v1 */ if (msm_soc_version_supports_iommu_v0()) kgsl_layout.domain_flags = MSM_IOMMU_DOMAIN_PT_CACHEABLE; domain_num = msm_register_domain(&kgsl_layout); if (domain_num >= 0) { iommu_pt->domain = msm_get_iommu_domain(domain_num); if (iommu_pt->domain) { iommu_set_fault_handler(iommu_pt->domain, kgsl_iommu_fault_handler, NULL); return iommu_pt; } } KGSL_CORE_ERR("Failed to create iommu domain\n"); kfree(iommu_pt); return NULL; } /* * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a * pagetable * @mmu - Pointer to the device mmu structure * @priv - Flag indicating whether the private or user context is to be * detached * * Detach the IOMMU unit with the domain that is contained in the * hwpagetable of the given mmu. After detaching the IOMMU unit is not * in use because the PTBR will not be set after a detach * Return - void */ static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct kgsl_iommu_pt *iommu_pt; struct kgsl_iommu *iommu = mmu->priv; int i, j; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; iommu_pt = mmu->defaultpagetable->priv; for (j = 0; j < iommu_unit->dev_count; j++) { /* * If there is a 2nd default pagetable then priv domain * is attached with this pagetable */ if (mmu->priv_bank_table && (KGSL_IOMMU_CONTEXT_PRIV == j)) iommu_pt = mmu->priv_bank_table->priv; if (iommu_unit->dev[j].attached) { iommu_detach_device(iommu_pt->domain, iommu_unit->dev[j].dev); iommu_unit->dev[j].attached = false; KGSL_MEM_INFO(mmu->device, "iommu %p detached " "from user dev of MMU: %p\n", iommu_pt->domain, mmu); } } } } /* * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a * pagetable, i.e set the IOMMU's PTBR to the pagetable address and * setup other IOMMU registers for the device so that it becomes * active * @mmu - Pointer to the device mmu structure * @priv - Flag indicating whether the private or user context is to be * attached * * Attach the IOMMU unit with the domain that is contained in the * hwpagetable of the given mmu. * Return - 0 on success else error code */ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct kgsl_iommu_pt *iommu_pt; struct kgsl_iommu *iommu = mmu->priv; int i, j, ret = 0; /* * Loop through all the iommu devcies under all iommu units and * attach the domain */ for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; iommu_pt = mmu->defaultpagetable->priv; for (j = 0; j < iommu_unit->dev_count; j++) { /* * If there is a 2nd default pagetable then priv domain * is attached to this pagetable */ if (mmu->priv_bank_table && (KGSL_IOMMU_CONTEXT_PRIV == j)) iommu_pt = mmu->priv_bank_table->priv; if (!iommu_unit->dev[j].attached) { ret = iommu_attach_device(iommu_pt->domain, iommu_unit->dev[j].dev); if (ret) { KGSL_MEM_ERR(mmu->device, "Failed to attach device, err %d\n", ret); goto done; } iommu_unit->dev[j].attached = true; KGSL_MEM_INFO(mmu->device, "iommu pt %p attached to dev %p, ctx_id %d\n", iommu_pt->domain, iommu_unit->dev[j].dev, iommu_unit->dev[j].ctx_id); } } } done: return ret; } /* * _get_iommu_ctxs - Get device pointer to IOMMU contexts * @mmu - Pointer to mmu device * data - Pointer to the platform data containing information about * iommu devices for one iommu unit * unit_id - The IOMMU unit number. This is not a specific ID but just * a serial number. The serial numbers are treated as ID's of the * IOMMU units * * Return - 0 on success else error code */ static int _get_iommu_ctxs(struct kgsl_mmu *mmu, struct kgsl_device_iommu_data *data, unsigned int unit_id) { struct kgsl_iommu *iommu = mmu->priv; struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id]; int i, j; int found_ctx; int ret = 0; for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) { found_ctx = 0; for (i = 0; i < data->iommu_ctx_count; i++) { if (j == data->iommu_ctxs[i].ctx_id) { found_ctx = 1; break; } } if (!found_ctx) break; if (!data->iommu_ctxs[i].iommu_ctx_name) { KGSL_CORE_ERR("Context name invalid\n"); ret = -EINVAL; goto done; } atomic_set( &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count), 0); iommu_unit->dev[iommu_unit->dev_count].dev = msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name); if (NULL == iommu_unit->dev[iommu_unit->dev_count].dev) ret = -EINVAL; if (IS_ERR(iommu_unit->dev[iommu_unit->dev_count].dev)) { ret = PTR_ERR( iommu_unit->dev[iommu_unit->dev_count].dev); iommu_unit->dev[iommu_unit->dev_count].dev = NULL; } if (ret) goto done; iommu_unit->dev[iommu_unit->dev_count].ctx_id = data->iommu_ctxs[i].ctx_id; iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device; KGSL_DRV_INFO(mmu->device, "Obtained dev handle %p for iommu context %s\n", iommu_unit->dev[iommu_unit->dev_count].dev, data->iommu_ctxs[i].iommu_ctx_name); iommu_unit->dev_count++; } done: if (!iommu_unit->dev_count && !ret) ret = -EINVAL; if (ret) { /* * If at least the first context is initialized on v1 * then we can continue */ if (!msm_soc_version_supports_iommu_v0() && iommu_unit->dev_count) ret = 0; else KGSL_CORE_ERR( "Failed to initialize iommu contexts, err: %d\n", ret); } return ret; } /* * kgsl_iommu_start_sync_lock - Initialize some variables during MMU start up * for GPU CPU synchronization * @mmu - Pointer to mmu device * * Return - 0 on success else error code */ static int kgsl_iommu_start_sync_lock(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = mmu->priv; uint32_t lock_gpu_addr = 0; if (KGSL_DEVICE_3D0 != mmu->device->id || !msm_soc_version_supports_iommu_v0() || !kgsl_mmu_is_perprocess(mmu) || iommu->sync_lock_vars) return 0; if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) { KGSL_DRV_ERR(mmu->device, "The GPU microcode does not support IOMMUv1 sync opcodes\n"); return -ENXIO; } /* Store Lock variables GPU address */ lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr + iommu->sync_lock_offset); kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr + (offsetof(struct remote_iommu_petersons_spinlock, flag[PROC_APPS]))); kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr + (offsetof(struct remote_iommu_petersons_spinlock, flag[PROC_GPU]))); kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr + (offsetof(struct remote_iommu_petersons_spinlock, turn))); iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars; return 0; } #ifdef CONFIG_MSM_IOMMU_GPU_SYNC /* * kgsl_get_sync_lock - Init Sync Lock between GPU and CPU * @mmu - Pointer to mmu device * * Return - 0 on success else error code */ static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = mmu->priv; int status = 0; uint32_t lock_phy_addr = 0; uint32_t page_offset = 0; if (!msm_soc_version_supports_iommu_v0() || !kgsl_mmu_is_perprocess(mmu)) return status; /* * For 2D devices cpu side sync lock is required. For 3D device, * since we only have a single 3D core and we always ensure that * 3D core is idle while writing to IOMMU register using CPU this * lock is not required */ if (KGSL_DEVICE_2D0 == mmu->device->id || KGSL_DEVICE_2D1 == mmu->device->id) { return status; } /* Return if already initialized */ if (iommu->sync_lock_initialized) return status; iommu_access_ops = msm_get_iommu_access_ops(); if (iommu_access_ops && iommu_access_ops->iommu_lock_initialize) { lock_phy_addr = (uint32_t) iommu_access_ops->iommu_lock_initialize(); if (!lock_phy_addr) { iommu_access_ops = NULL; return status; } lock_phy_addr = lock_phy_addr - (uint32_t)MSM_SHARED_RAM_BASE + (uint32_t)msm_shared_ram_phys; } /* Align the physical address to PAGE boundary and store the offset */ page_offset = (lock_phy_addr & (PAGE_SIZE - 1)); lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1)); iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr; iommu->sync_lock_offset = page_offset; iommu->sync_lock_desc.size = PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars)); status = memdesc_sg_phys(&iommu->sync_lock_desc, iommu->sync_lock_desc.physaddr, iommu->sync_lock_desc.size); if (status) { iommu_access_ops = NULL; return status; } /* Flag Sync Lock is Initialized */ iommu->sync_lock_initialized = 1; return status; } #else static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu) { return 0; } #endif /* * kgsl_iommu_sync_lock - Acquire Sync Lock between GPU and CPU * @mmu - Pointer to mmu device * @cmds - Pointer to array of commands * * Return - int - number of commands. */ inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu, unsigned int *cmds) { struct kgsl_device *device = mmu->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_iommu *iommu = mmu->device->mmu.priv; struct remote_iommu_petersons_spinlock *lock_vars = iommu->sync_lock_vars; unsigned int *start = cmds; if (!iommu->sync_lock_initialized) return 0; *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 1; cmds += adreno_add_idle_cmds(adreno_dev, cmds); *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5); /* MEM SPACE = memory, FUNCTION = equals */ *cmds++ = 0x13; *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->turn; *cmds++ = 0; cmds += adreno_add_idle_cmds(adreno_dev, cmds); *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5); /* MEM SPACE = memory, FUNCTION = equals */ *cmds++ = 0x13; *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3); *cmds++ = lock_vars->flag[PROC_APPS]; *cmds++ = lock_vars->turn; *cmds++ = 0; cmds += adreno_add_idle_cmds(adreno_dev, cmds); return cmds - start; } /* * kgsl_iommu_sync_lock - Release Sync Lock between GPU and CPU * @mmu - Pointer to mmu device * @cmds - Pointer to array of commands * * Return - int - number of commands. */ inline unsigned int kgsl_iommu_sync_unlock(struct kgsl_mmu *mmu, unsigned int *cmds) { struct kgsl_device *device = mmu->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_iommu *iommu = mmu->device->mmu.priv; struct remote_iommu_petersons_spinlock *lock_vars = iommu->sync_lock_vars; unsigned int *start = cmds; if (!iommu->sync_lock_initialized) return 0; *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0; *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5); /* MEM SPACE = memory, FUNCTION = equals */ *cmds++ = 0x13; *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0x0; *cmds++ = 0x1; *cmds++ = 0x1; cmds += adreno_add_idle_cmds(adreno_dev, cmds); return cmds - start; } /* * kgsl_get_iommu_ctxt - Get device pointer to IOMMU contexts * @mmu - Pointer to mmu device * * Get the device pointers for the IOMMU user and priv contexts of the * kgsl device * Return - 0 on success else error code */ static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu) { struct platform_device *pdev = container_of(mmu->device->parentdev, struct platform_device, dev); struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data; struct kgsl_iommu *iommu = mmu->device->mmu.priv; int i, ret = 0; /* Go through the IOMMU data and get all the context devices */ if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) { KGSL_CORE_ERR("Too many IOMMU units defined\n"); ret = -EINVAL; goto done; } for (i = 0; i < pdata_dev->iommu_count; i++) { ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i); if (ret) break; } iommu->unit_count = pdata_dev->iommu_count; done: return ret; } /* * kgsl_set_register_map - Map the IOMMU regsiters in the memory descriptors * of the respective iommu units * @mmu - Pointer to mmu structure * * Return - 0 on success else error code */ static int kgsl_set_register_map(struct kgsl_mmu *mmu) { struct platform_device *pdev = container_of(mmu->device->parentdev, struct platform_device, dev); struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data; struct kgsl_iommu *iommu = mmu->device->mmu.priv; struct kgsl_iommu_unit *iommu_unit; int i = 0, ret = 0; for (; i < pdata_dev->iommu_count; i++) { struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i]; iommu_unit = &iommu->iommu_units[i]; /* set up the IOMMU register map for the given IOMMU unit */ if (!data.physstart || !data.physend) { KGSL_CORE_ERR("The register range for IOMMU unit not" " specified\n"); ret = -EINVAL; goto err; } iommu_unit->reg_map.hostptr = ioremap(data.physstart, data.physend - data.physstart + 1); if (!iommu_unit->reg_map.hostptr) { KGSL_CORE_ERR("Failed to map SMMU register address " "space from %x to %x\n", data.physstart, data.physend - data.physstart + 1); ret = -ENOMEM; i--; goto err; } iommu_unit->reg_map.size = data.physend - data.physstart + 1; iommu_unit->reg_map.physaddr = data.physstart; ret = memdesc_sg_phys(&iommu_unit->reg_map, data.physstart, iommu_unit->reg_map.size); if (ret) goto err; if (!msm_soc_version_supports_iommu_v0()) iommu_unit->iommu_halt_enable = 1; iommu_unit->ahb_base = data.physstart - mmu->device->reg_phys; } iommu->unit_count = pdata_dev->iommu_count; return ret; err: /* Unmap any mapped IOMMU regions */ for (; i >= 0; i--) { iommu_unit = &iommu->iommu_units[i]; iounmap(iommu_unit->reg_map.hostptr); iommu_unit->reg_map.size = 0; iommu_unit->reg_map.physaddr = 0; } return ret; } /* * kgsl_iommu_get_pt_base_addr - Get the address of the pagetable that the * IOMMU ttbr0 register is programmed with * @mmu - Pointer to mmu * @pt - kgsl pagetable pointer that contains the IOMMU domain pointer * * Return - actual pagetable address that the ttbr0 register is programmed * with */ static phys_addr_t kgsl_iommu_get_pt_base_addr(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { struct kgsl_iommu_pt *iommu_pt = pt->priv; return iommu_get_pt_base_addr(iommu_pt->domain) & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; } /* * kgsl_iommu_get_default_ttbr0 - Return the ttbr0 value programmed by * iommu driver * @mmu - Pointer to mmu structure * @hostptr - Pointer to the IOMMU register map. This is used to match * the iommu device whose lsb value is to be returned * @ctx_id - The context bank whose lsb valus is to be returned * Return - returns the ttbr0 value programmed by iommu driver */ static phys_addr_t kgsl_iommu_get_default_ttbr0(struct kgsl_mmu *mmu, unsigned int unit_id, enum kgsl_iommu_context_id ctx_id) { struct kgsl_iommu *iommu = mmu->priv; int i, j; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) if (unit_id == i && ctx_id == iommu_unit->dev[j].ctx_id) return iommu_unit->dev[j].default_ttbr0; } return 0; } static int kgsl_iommu_setstate(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable, unsigned int context_id) { int ret = 0; if (mmu->flags & KGSL_FLAGS_STARTED) { /* page table not current, then setup mmu to use new * specified page table */ if (mmu->hwpagetable != pagetable) { unsigned int flags = 0; mmu->hwpagetable = pagetable; flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable, mmu->device->id) | KGSL_MMUFLAGS_TLBFLUSH; ret = kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_PTUPDATE | flags); } } return ret; } /* * kgsl_iommu_setup_regs - map iommu registers into a pagetable * @mmu: Pointer to mmu structure * @pt: the pagetable * * To do pagetable switches from the GPU command stream, the IOMMU * registers need to be mapped into the GPU's pagetable. This function * is used differently on different targets. On 8960, the registers * are mapped into every pagetable during kgsl_setup_pt(). On * all other targets, the registers are mapped only into the second * context bank. * * Return - 0 on success else error code */ static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { int status; int i = 0; struct kgsl_iommu *iommu = mmu->priv; if (!msm_soc_version_supports_iommu_v0()) return 0; for (i = 0; i < iommu->unit_count; i++) { status = kgsl_mmu_map_global(pt, &(iommu->iommu_units[i].reg_map)); if (status) goto err; } /* Map Lock variables to GPU pagetable */ if (iommu->sync_lock_initialized) { status = kgsl_mmu_map_global(pt, &iommu->sync_lock_desc); if (status) goto err; } return 0; err: for (i--; i >= 0; i--) kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map)); return status; } /* * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable * @mmu: Pointer to mmu structure * @pt: the pagetable * * Removes mappings created by kgsl_iommu_setup_regs(). * * Return - 0 on success else error code */ static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { struct kgsl_iommu *iommu = mmu->priv; int i; for (i = 0; i < iommu->unit_count; i++) kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map)); if (iommu->sync_lock_desc.gpuaddr) kgsl_mmu_unmap(pt, &iommu->sync_lock_desc); } /* * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register * @mmu - Pointer to mmu structure * @iommu_unit - The iommu unit for which base address is requested * @ctx_id - The context ID of the IOMMU ctx * @reg - The register for which address is required * * Return - The address of register which can be used in type0 packet */ static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu, int iommu_unit, int ctx_id, enum kgsl_iommu_reg_map reg) { struct kgsl_iommu *iommu = mmu->priv; if (iommu->iommu_reg_list[reg].ctx_reg) return iommu->iommu_units[iommu_unit].ahb_base + iommu->iommu_reg_list[reg].reg_offset + (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset; else return iommu->iommu_units[iommu_unit].ahb_base + iommu->iommu_reg_list[reg].reg_offset; } static int kgsl_iommu_init(struct kgsl_mmu *mmu) { /* * intialize device mmu * * call this with the global lock held */ int status = 0; struct kgsl_iommu *iommu; atomic_set(&mmu->fault, 0); iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL); if (!iommu) { KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(struct kgsl_iommu)); return -ENOMEM; } mmu->priv = iommu; status = kgsl_get_iommu_ctxt(mmu); if (status) goto done; status = kgsl_set_register_map(mmu); if (status) goto done; /* * IOMMU-v1 requires hardware halt support to do in stream * pagetable switching. This check assumes that if there are * multiple units, they will be matching hardware. */ mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT && (msm_soc_version_supports_iommu_v0() || iommu->iommu_units[0].iommu_halt_enable); /* * For IOMMU per-process pagetables, the allocatable range * and the kernel global range must both be outside * the userspace address range. There is a 1Mb gap * between these address ranges to make overrun * detection easier. * For the shared pagetable case use 2GB and because * mirroring the CPU address space is not possible and * we're better off with extra room. */ if (mmu->pt_per_process) { #ifndef CONFIG_MSM_KGSL_CFF_DUMP mmu->pt_base = PAGE_OFFSET; mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE - kgsl_mmu_get_base_addr(mmu) - SZ_1M; mmu->use_cpu_map = true; #else mmu->pt_base = KGSL_PAGETABLE_BASE; mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE - KGSL_PAGETABLE_BASE; mmu->use_cpu_map = false; #endif } else { mmu->pt_base = KGSL_PAGETABLE_BASE; #ifndef CONFIG_MSM_KGSL_CFF_DUMP mmu->pt_size = SZ_2G; #else mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE - KGSL_PAGETABLE_BASE; #endif mmu->use_cpu_map = false; } status = kgsl_iommu_init_sync_lock(mmu); if (status) goto done; iommu->iommu_reg_list = kgsl_iommuv0_reg; iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0; if (msm_soc_version_supports_iommu_v0()) { iommu->iommu_reg_list = kgsl_iommuv0_reg; iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0; } else { iommu->iommu_reg_list = kgsl_iommuv1_reg; iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1; } /* A nop is required in an indirect buffer when switching * pagetables in-stream */ kgsl_sharedmem_writel(mmu->device, &mmu->setstate_memory, KGSL_IOMMU_SETSTATE_NOP_OFFSET, cp_nop_packet(1)); if (cpu_is_msm8960()) { /* * 8960 doesn't have a second context bank, so the IOMMU * registers must be mapped into every pagetable. */ iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs; iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs; } if (kgsl_guard_page == NULL) { kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_HIGHMEM); if (kgsl_guard_page == NULL) { status = -ENOMEM; goto done; } } dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n", __func__); done: if (status) { kfree(iommu); mmu->priv = NULL; } return status; } /* * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable * for iommu. This function is only called once during first start, successive * start do not call this funciton. * @mmu - Pointer to mmu structure * * Create the initial defaultpagetable and setup the iommu mappings to it * Return - 0 on success else error code */ static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu) { int status = 0; /* If chip is not 8960 then we use the 2nd context bank for pagetable * switching on the 3D side for which a separate table is allocated */ if (msm_soc_version_supports_iommu_v0()) { mmu->priv_bank_table = kgsl_mmu_getpagetable(mmu, KGSL_MMU_PRIV_BANK_TABLE_NAME); if (mmu->priv_bank_table == NULL) { status = -ENOMEM; goto err; } status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table); if (status) goto err; } mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT); /* Return error if the default pagetable doesn't exist */ if (mmu->defaultpagetable == NULL) { status = -ENOMEM; goto err; } return status; err: if (mmu->priv_bank_table) { kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table); kgsl_mmu_putpagetable(mmu->priv_bank_table); mmu->priv_bank_table = NULL; } if (mmu->defaultpagetable) { kgsl_mmu_putpagetable(mmu->defaultpagetable); mmu->defaultpagetable = NULL; } return status; } /* * kgsl_iommu_lock_rb_in_tlb - Allocates tlb entries and locks the * virtual to physical address translation of ringbuffer for 3D * device into tlb. * @mmu - Pointer to mmu structure * * Return - void */ static void kgsl_iommu_lock_rb_in_tlb(struct kgsl_mmu *mmu) { struct kgsl_device *device = mmu->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb; struct kgsl_iommu *iommu = mmu->priv; unsigned int num_tlb_entries; unsigned int tlblkcr = 0; unsigned int v2pxx = 0; unsigned int vaddr = 0; int i, j, k, l; if (!iommu->sync_lock_initialized) return; rb = &adreno_dev->ringbuffer; num_tlb_entries = rb->buffer_desc.size / PAGE_SIZE; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { tlblkcr = 0; if (cpu_is_msm8960()) tlblkcr |= ((num_tlb_entries & KGSL_IOMMU_TLBLKCR_FLOOR_MASK) << KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT); else tlblkcr |= (((num_tlb_entries * iommu_unit->dev_count) & KGSL_IOMMU_TLBLKCR_FLOOR_MASK) << KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT); /* Do not invalidate locked entries on tlbiall flush */ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK) << KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT); tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK) << KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT); tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK) << KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT); /* Enable tlb locking */ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_LKE_MASK) << KGSL_IOMMU_TLBLKCR_LKE_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TLBLKCR, tlblkcr); } for (j = 0; j < iommu_unit->dev_count; j++) { /* skip locking entries for private bank on 8960 */ if (cpu_is_msm8960() && KGSL_IOMMU_CONTEXT_PRIV == j) continue; /* Lock the ringbuffer virtual address into tlb */ vaddr = rb->buffer_desc.gpuaddr; for (k = 0; k < num_tlb_entries; k++) { v2pxx = 0; v2pxx |= (((k + j * num_tlb_entries) & KGSL_IOMMU_V2PXX_INDEX_MASK) << KGSL_IOMMU_V2PXX_INDEX_SHIFT); v2pxx |= vaddr & (KGSL_IOMMU_V2PXX_VA_MASK << KGSL_IOMMU_V2PXX_VA_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, V2PUR, v2pxx); mb(); vaddr += PAGE_SIZE; for (l = 0; l < iommu_unit->dev_count; l++) { tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[l].ctx_id, TLBLKCR); mb(); tlblkcr &= ~(KGSL_IOMMU_TLBLKCR_VICTIM_MASK << KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT); tlblkcr |= (((k + 1 + (j * num_tlb_entries)) & KGSL_IOMMU_TLBLKCR_VICTIM_MASK) << KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[l].ctx_id, TLBLKCR, tlblkcr); } } } for (j = 0; j < iommu_unit->dev_count; j++) { tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TLBLKCR); mb(); /* Disable tlb locking */ tlblkcr &= ~(KGSL_IOMMU_TLBLKCR_LKE_MASK << KGSL_IOMMU_TLBLKCR_LKE_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TLBLKCR, tlblkcr); } } } static int kgsl_iommu_start(struct kgsl_mmu *mmu) { int status; struct kgsl_iommu *iommu = mmu->priv; int i, j; int sctlr_val = 0; struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device); if (mmu->flags & KGSL_FLAGS_STARTED) return 0; if (mmu->defaultpagetable == NULL) { status = kgsl_iommu_setup_defaultpagetable(mmu); if (status) return -ENOMEM; } status = kgsl_iommu_start_sync_lock(mmu); if (status) return status; /* We use the GPU MMU to control access to IOMMU registers on 8960 with * a225, hence we still keep the MMU active on 8960 */ if (cpu_is_msm8960() && KGSL_DEVICE_3D0 == mmu->device->id) { struct kgsl_mh *mh = &(mmu->device->mh); BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 && mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr); kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001); kgsl_regwrite(mmu->device, MH_MMU_MPU_END, mh->mpu_base + mh->mpu_range); } mmu->hwpagetable = mmu->defaultpagetable; status = kgsl_attach_pagetable_iommu_domain(mmu); if (status) { mmu->hwpagetable = NULL; goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); if (status) { KGSL_CORE_ERR("clk enable failed\n"); goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV); if (status) { kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); KGSL_CORE_ERR("clk enable failed\n"); goto done; } /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as * that value should not change when we change pagetables, so while * changing pagetables we can use this lsb value of the pagetable w/o * having to read it again */ _iommu_lock(iommu); for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { /* * For IOMMU V1 do not halt IOMMU on pagefault if * FT pagefault policy is set accordingly */ if ((!msm_soc_version_supports_iommu_v0()) && (!(adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))) { sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, SCTLR); sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, SCTLR, sctlr_val); } if (sizeof(phys_addr_t) > sizeof(unsigned long)) { iommu_unit->dev[j].default_ttbr0 = KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TTBR0); } else { iommu_unit->dev[j].default_ttbr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TTBR0); } } } kgsl_iommu_lock_rb_in_tlb(mmu); _iommu_unlock(iommu); /* For complete CFF */ kgsl_cffdump_setmem(mmu->device, mmu->setstate_memory.gpuaddr + KGSL_IOMMU_SETSTATE_NOP_OFFSET, cp_nop_packet(1), sizeof(unsigned int)); kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV); mmu->flags |= KGSL_FLAGS_STARTED; done: return status; } static int kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc, unsigned int *tlb_flags) { int ret = 0, lock_taken = 0; unsigned int range = memdesc->size; struct kgsl_iommu_pt *iommu_pt = pt->priv; struct kgsl_device *device = pt->mmu->device; struct kgsl_iommu *iommu = pt->mmu->priv; /* All GPU addresses as assigned are page aligned, but some functions purturb the gpuaddr with an offset, so apply the mask here to make sure we have the right address */ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK; if (range == 0 || gpuaddr == 0) return 0; if (kgsl_memdesc_has_guard_page(memdesc)) range += PAGE_SIZE; ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range); if (ret) { KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed " "with err: %d\n", iommu_pt->domain, gpuaddr, range, ret); return ret; } if (!mutex_is_locked(&device->mutex) || device->mutex.owner != current) { mutex_lock(&device->mutex); lock_taken = 1; } /* If current pt then flush immediately */ if (kgsl_mmu_is_perprocess(pt->mmu) && iommu->iommu_units[0].dev[KGSL_IOMMU_CONTEXT_USER].attached && kgsl_iommu_pt_equal(pt->mmu, pt, kgsl_iommu_get_current_ptbase(pt->mmu))) kgsl_iommu_default_setstate(pt->mmu, KGSL_MMUFLAGS_TLBFLUSH); if (lock_taken) mutex_unlock(&device->mutex); return ret; } static int kgsl_iommu_map(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc, unsigned int protflags, unsigned int *tlb_flags) { int ret; unsigned int iommu_virt_addr; struct kgsl_iommu_pt *iommu_pt = pt->priv; int size = memdesc->size; BUG_ON(NULL == iommu_pt); iommu_virt_addr = memdesc->gpuaddr; ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg, size, protflags); if (ret) { KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %x) err: %d\n", iommu_pt->domain, iommu_virt_addr, memdesc->sg, size, protflags, ret); return ret; } if (kgsl_memdesc_has_guard_page(memdesc)) { ret = iommu_map(iommu_pt->domain, iommu_virt_addr + size, page_to_phys(kgsl_guard_page), PAGE_SIZE, protflags & ~IOMMU_WRITE); if (ret) { KGSL_CORE_ERR("iommu_map(%p, %x, guard, %x) err: %d\n", iommu_pt->domain, iommu_virt_addr + size, protflags & ~IOMMU_WRITE, ret); /* cleanup the partial mapping */ iommu_unmap_range(iommu_pt->domain, iommu_virt_addr, size); } } return ret; } void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = mmu->priv; int i, j; if (atomic_read(&mmu->fault)) { for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { if (iommu_unit->dev[j].fault) { kgsl_iommu_enable_clk(mmu, j); _iommu_lock(iommu); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, RESUME, 1); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, FSR, 0); kgsl_iommu_disable_clk(mmu, j); _iommu_unlock(iommu); iommu_unit->dev[j].fault = 0; } } } atomic_set(&mmu->fault, 0); } } static void kgsl_iommu_stop(struct kgsl_mmu *mmu) { /* * stop device mmu * * call this with the global lock held */ if (mmu->flags & KGSL_FLAGS_STARTED) { /* detach iommu attachment */ kgsl_detach_pagetable_iommu_domain(mmu); mmu->hwpagetable = NULL; mmu->flags &= ~KGSL_FLAGS_STARTED; kgsl_iommu_pagefault_resume(mmu); } /* switch off MMU clocks and cancel any events it has queued */ kgsl_cancel_events(mmu->device, mmu); } static int kgsl_iommu_close(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = mmu->priv; int i; if (mmu->priv_bank_table != NULL) { kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table); kgsl_mmu_putpagetable(mmu->priv_bank_table); } if (mmu->defaultpagetable != NULL) kgsl_mmu_putpagetable(mmu->defaultpagetable); for (i = 0; i < iommu->unit_count; i++) { struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map; if (reg_map->hostptr) iounmap(reg_map->hostptr); kgsl_sg_free(reg_map->sg, reg_map->sglen); reg_map->priv &= ~KGSL_MEMDESC_GLOBAL; } /* clear IOMMU GPU CPU sync structures */ kgsl_sg_free(iommu->sync_lock_desc.sg, iommu->sync_lock_desc.sglen); memset(&iommu->sync_lock_desc, 0, sizeof(iommu->sync_lock_desc)); iommu->sync_lock_vars = NULL; kfree(iommu); if (kgsl_guard_page != NULL) { __free_page(kgsl_guard_page); kgsl_guard_page = NULL; } return 0; } static phys_addr_t kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu) { phys_addr_t pt_base; struct kgsl_iommu *iommu = mmu->priv; /* We cannot enable or disable the clocks in interrupt context, this function is called from interrupt context if there is an axi error */ if (in_interrupt()) return 0; /* Return the current pt base by reading IOMMU pt_base register */ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]), KGSL_IOMMU_CONTEXT_USER, TTBR0); kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; } /* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; int ret = 0; phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu, mmu->hwpagetable); phys_addr_t pt_val; ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); if (ret) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return ret; } /* For v0 SMMU GPU needs to be idle for tlb invalidate as well */ if (msm_soc_version_supports_iommu_v0()) { ret = kgsl_idle(mmu->device); if (ret) return ret; } /* Acquire GPU-CPU sync Lock here */ _iommu_lock(iommu); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v0()) { ret = kgsl_idle(mmu->device); if (ret) goto unlock; } for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_default_ttbr0(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK; pt_val |= pt_base; if (sizeof(phys_addr_t) > sizeof(unsigned long)) { KGSL_IOMMU_SET_CTX_REG_LL(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); } else { KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); } mb(); temp = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { unsigned long wait_for_flush; for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1); mb(); /* * Wait for flush to complete by polling the flush * status bit of TLBSTATUS register for not more than * 2 s. After 2s just exit, at that point the SMMU h/w * may be stuck and will eventually cause GPU to hang * or bring the system down. */ if (!msm_soc_version_supports_iommu_v0()) { wait_for_flush = jiffies + msecs_to_jiffies(2000); KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBSYNC, 0); while (KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBSTATUS) & (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) { if (time_after(jiffies, wait_for_flush)) { KGSL_DRV_ERR(mmu->device, "Wait limit reached for IOMMU tlb flush\n"); break; } cpu_relax(); } } } } unlock: /* Release GPU-CPU sync Lock here */ _iommu_unlock(iommu); /* Disable smmu clock */ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); return ret; } /* * kgsl_iommu_get_reg_gpuaddr - Returns the gpu address of IOMMU regsiter * @mmu - Pointer to mmu structure * @iommu_unit - The iommu unit for which base address is requested * @ctx_id - The context ID of the IOMMU ctx * @reg - The register for which address is required * * Return - The gpu address of register which can be used in type3 packet */ static unsigned int kgsl_iommu_get_reg_gpuaddr(struct kgsl_mmu *mmu, int iommu_unit, int ctx_id, int reg) { struct kgsl_iommu *iommu = mmu->priv; if (KGSL_IOMMU_GLOBAL_BASE == reg) return iommu->iommu_units[iommu_unit].reg_map.gpuaddr; if (iommu->iommu_reg_list[reg].ctx_reg) return iommu->iommu_units[iommu_unit].reg_map.gpuaddr + iommu->iommu_reg_list[reg].reg_offset + (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset; else return iommu->iommu_units[iommu_unit].reg_map.gpuaddr + iommu->iommu_reg_list[reg].reg_offset; } /* * kgsl_iommu_hw_halt_supported - Returns whether IOMMU halt command is * supported * @mmu - Pointer to mmu structure * @iommu_unit - The iommu unit for which the property is requested */ static int kgsl_iommu_hw_halt_supported(struct kgsl_mmu *mmu, int iommu_unit) { struct kgsl_iommu *iommu = mmu->priv; return iommu->iommu_units[iommu_unit].iommu_halt_enable; } static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = mmu->priv; return iommu->unit_count; } /* * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU * @mmu: Pointer to mmu structure * @pf_policy: The pagefault polict to set * * Check if the new policy indicated by pf_policy is same as current * policy, if same then return else set the policy */ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu, unsigned int pf_policy) { int i, j; struct kgsl_iommu *iommu = mmu->priv; struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device); int ret = 0; unsigned int sctlr_val; if ((adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) == (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) return ret; if (msm_soc_version_supports_iommu_v0()) return ret; ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); if (ret) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return ret; } ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV); if (ret) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); return ret; } /* Need to idle device before changing options */ ret = mmu->device->ftbl->idle(mmu->device); if (ret) { kgsl_iommu_disable_clk_on_ts(mmu, 0, false); return ret; } for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, SCTLR); if (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT); else sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT); KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, SCTLR, sctlr_val); } } kgsl_iommu_disable_clk_on_ts(mmu, 0, false); return ret; } struct kgsl_mmu_ops iommu_ops = { .mmu_init = kgsl_iommu_init, .mmu_close = kgsl_iommu_close, .mmu_start = kgsl_iommu_start, .mmu_stop = kgsl_iommu_stop, .mmu_setstate = kgsl_iommu_setstate, .mmu_device_setstate = kgsl_iommu_default_setstate, .mmu_pagefault = NULL, .mmu_pagefault_resume = kgsl_iommu_pagefault_resume, .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase, .mmu_enable_clk = kgsl_iommu_enable_clk, .mmu_disable_clk = kgsl_iommu_disable_clk, .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts, .mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0, .mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr, .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr, .mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units, .mmu_pt_equal = kgsl_iommu_pt_equal, .mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr, .mmu_hw_halt_supported = kgsl_iommu_hw_halt_supported, /* These callbacks will be set on some chipsets */ .mmu_setup_pt = NULL, .mmu_cleanup_pt = NULL, .mmu_sync_lock = kgsl_iommu_sync_lock, .mmu_sync_unlock = kgsl_iommu_sync_unlock, .mmu_set_pf_policy = kgsl_iommu_set_pf_policy, }; struct kgsl_mmu_pt_ops iommu_pt_ops = { .mmu_map = kgsl_iommu_map, .mmu_unmap = kgsl_iommu_unmap, .mmu_create_pagetable = kgsl_iommu_create_pagetable, .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable, };
Java
// 20020717 gdr // Copyright (C) 2002-2021 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. // Test slice class invariants #include <valarray> #include <cstdlib> #include <testsuite_hooks.h> bool construction(std::size_t start, std::size_t size, std::size_t stride) { std::slice s(start, size, stride); return s.start() == start && s.size() == size && s.stride() == stride; } bool copy(std::size_t start, std::size_t size, std::size_t stride) { std::slice s(start, size, stride); std::slice t = s; return t.start() == start && t.size() == size && t.stride() == stride; } bool assignment(std::size_t start, std::size_t size, std::size_t stride) { std::slice s(start, size, stride); std::slice t; t = s; return t.start() == start && t.size() == size && t.stride() == stride; } int main() { std::srand(20020717); using std::rand; VERIFY(construction(rand(), rand(), rand())); VERIFY(copy(rand(), rand(), rand())); VERIFY(assignment(rand(), rand(), rand())); return 0; }
Java
<!-- * Asterisk-GUI - an Asterisk configuration interface * * Set Date on the Digium Appliance - AA50 * * Copyright (C) 2007-2008, Digium, Inc. * * Pari Nannapaneni <[email protected]> * * See http://www.asterisk.org for more information about * the Asterisk project. Please do not directly contact * any of the maintainers of this project for assistance; * the project provides a web site, mailing lists and IRC * channels for your use. * * This program is free software, distributed under the terms of * the GNU General Public License Version 2. See the LICENSE file * at the top of the source tree. * * --> <html> <head> <title>Update Date & Time</title> <meta http-equiv="Content-Type" content="text/html;charset=ISO-8859-1" /> <link href="stylesheets/schwing.css" media="all" rel="Stylesheet" type="text/css" /> <style type="text/css"></style> </head> <body> <div class="iframeTitleBar"> Update Date & Time <span style="cursor: pointer; cursor: hand;" onclick="window.location.reload();" >&nbsp;<img src="images/refresh.png" title=" Refresh " border=0 >&nbsp;</span> </div> <div class='lite_Heading'> Update Date & Time </div> <table width=700 border=0 align=left> <tr> <td align=right><B>NTP server :</B></td> <td> <span id='NTPSERVER'></span> &nbsp;&nbsp;&nbsp;<A href='#' class='splbutton' title='Edit NTP server' onclick="parent.miscFunctions.click_panel('networking.html');"><B>Edit</B></A> </td> </tr> <tr> <td height=15></td><td></td></tr> <tr> <td valign=top align=right><B>Current System Date :</B></td> <td id='current_date'></td> </tr> <tr class='lite'> <td valign=top align=right><B>Current System Date in Local Time :</B></td> <td id='current_date_local'></td> </tr> <tr> <td height=15></td><td></td></tr> <tr> <td valign=top align=right> <B>Set New Date & Time :</B><BR> <span class='lite'>Enter Date & Time in your Local time&nbsp;&nbsp;</span> </td> <td> <TABLE cellpadding=6 cellspacing=1 border=0> <TR> <TD width=70 align=right>Date </TD> <TD><input size=10 id="date_day"></TD> </TR> <TR> <TD width=70 align=right>Time</TD> <TD> <!-- Time --> <select id="hod"></select>: <select id="minute"></select>&nbsp; <select id="ampm"> <option value="AM">AM</option> <option value="PM">PM</option> </select> <!-- Time --> </TD> </TR> <TR> <TD colspan=2 align=center> <span class='guiButton' onclick='update_systemdate();'>Update</span> </TD> </TR> </TABLE> </td> </tr> </table> <script src="js/jquery.js"></script> <script src="js/astman.js"></script> <script src="js/jquery.tooltip.js"></script> <script src="js/jquery.date_input.js"></script> <script> function localajaxinit(){ top.document.title = 'Set Date & Time' ; parent.ASTGUI.dialog.waitWhile('Loading...'); $("#date_day").date_input(); (function(){ var x; var hod = _$('hod'); var minute = _$('minute'); for(var i=1; i < 13; i++){ x = i.addZero(); ASTGUI.selectbox.append(hod, x , x); } for(var i=0; i < 60; i++){ x = i.addZero(); ASTGUI.selectbox.append(minute, x , x); } hod.selectedIndex = -1; minute.selectedIndex = -1; })(); (function(){ var c = context2json({ filename:'networking.conf' , context : 'general' , usf:1 }); _$('NTPSERVER').innerHTML = (c && c['NTP_ADDRESS']) || '&nbsp;&nbsp;--'; })(); ASTGUI.systemCmdWithOutput( "date ", function(output){ _$('current_date').innerHTML = '&nbsp;&nbsp;' + output.bold_X('UTC') ; _$('current_date_local').innerHTML = '&nbsp;&nbsp;' + ASTGUI.toLocalTime(output); parent.ASTGUI.dialog.hide(); }); }; function update_systemdate(){ parent.ASTGUI.dialog.waitWhile('Updating Date & Time ...'); try{ // convert local time to UTC var lt_minutes = _$('minute').value ; // 0 to 59 var date_day = _$("date_day").value ; if( !date_day || (_$('hod').selectedIndex == -1 ) || ( _$('minute').selectedIndex == -1) ){ parent.ASTGUI.dialog.hide(); return; } var date_day_split = date_day.split(' '); var lt_month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"].indexOf(date_day_split[1]); // 0 to 11 var lt_dom = date_day_split[0] ; // 1 to 31 lt_dom = lt_dom.addZero() ; var lt_year = date_day_split[2] ; // 2007 // prepare commands to set the date if( _$('ampm').value == "AM" ){ var lt_hours = (_$('hod').value == "12" )? "00" : _$('hod').value; }else if( _$('ampm').value == "PM"){ var lt_hours = ( _$('hod').value == "12") ? parseInt( _$('hod').value) : parseInt( _$('hod').value) + 12 ; } var lt = new Date(); lt.setFullYear ( lt_year, lt_month, lt_dom ); lt.setHours ( lt_hours, lt_minutes ); var utc_hours = lt.getUTCHours(); // 0 to 23 var utc_minutes = lt.getUTCMinutes(); // 0 to 59 var utc_month = lt.getUTCMonth(); // 0 to 11 var utc_dom = lt.getUTCDate(); // 1 to 31 var utc_year = lt.getUTCFullYear() ; // 2007 if (utc_month < 10) { utc_month = "0"+ String(utc_month+1); }else{utc_month = String(utc_month+1) ;} if (utc_dom < 10) { utc_dom = "0"+ String(utc_dom) ; } if (utc_hours < 10) { utc_hours = "0"+ String(utc_hours) ; } if (utc_minutes < 10) { utc_minutes = "0"+ String(utc_minutes) ; } var newdate = utc_month + utc_dom + utc_hours + utc_minutes + utc_year ; }catch(err){ parent.ASTGUI.dialog.hide(); return false; } parent.ASTGUI.systemCmd( "date -s " + newdate , function(){ parent.ASTGUI.dialog.hide(); var after = function(){ alert("You will be now logged out of the gui.\n Please login again !!"); var f = makeSyncRequest({ action :'logoff'}); top.window.location.reload(); }; ASTGUI.feedback( { msg:'updated date & time', showfor:2 }); setTimeout( after, 1000 ); }); } </script> </body> </html>
Java
<?php /** * This file represents an example of the code that themes would use to register * the required plugins. * * It is expected that theme authors would copy and paste this code into their * functions.php file, and amend to suit. * * @package TGM-Plugin-Activation * @subpackage Example * @version 2.3.6 * @author Thomas Griffin <[email protected]> * @author Gary Jones <[email protected]> * @copyright Copyright (c) 2012, Thomas Griffin * @license http://opensource.org/licenses/gpl-2.0.php GPL v2 or later * @link https://github.com/thomasgriffin/TGM-Plugin-Activation */ /** * Include the TGM_Plugin_Activation class. */ require_once ( get_template_directory() .'/functions/class-tgm-plugin-activation.php' ); add_action( 'tgmpa_register', 'my_theme_register_required_plugins' ); /** * Register the required plugins for this theme. * * In this example, we register two plugins - one included with the TGMPA library * and one from the .org repo. * * The variable passed to tgmpa_register_plugins() should be an array of plugin * arrays. * * This function is hooked into tgmpa_init, which is fired within the * TGM_Plugin_Activation class constructor. */ function my_theme_register_required_plugins() { /** * Array of plugin arrays. Required keys are name and slug. * If the source is NOT from the .org repo, then source is also required. */ $plugins = array( array( 'name' => 'Symple Shortcodes', 'slug' => 'symple-shortcodes', 'source' => 'http://www.wpexplorer.com/symple-shortcodes-download', 'required' => false, 'force_activation' => false, 'force_deactivation' => false, ), array( 'name' => 'ZillaLikes', 'slug' => 'zilla-likes', 'source' => get_template_directory_uri() . '/plugins/zilla-likes.zip', 'Required' => true, 'version' => '1.0', 'force_activation' => false, 'force_deactivation' => false, 'external_url' => '', ) ); // Change this to your theme text domain, used for internationalising strings $theme_text_domain = 'tgmpa'; /** * Array of configuration settings. Amend each line as needed. * If you want the default strings to be available under your own theme domain, * leave the strings uncommented. * Some of the strings are added into a sprintf, so see the comments at the * end of each line for what each argument will be. */ $config = array( 'domain' => $theme_text_domain, // Text domain - likely want to be the same as your theme. 'default_path' => '', // Default absolute path to pre-packaged plugins 'parent_menu_slug' => 'themes.php', // Default parent menu slug 'parent_url_slug' => 'themes.php', // Default parent URL slug 'menu' => 'install-required-plugins', // Menu slug 'has_notices' => true, // Show admin notices or not 'is_automatic' => false, // Automatically activate plugins after installation or not 'message' => '', // Message to output right before the plugins table 'strings' => array( 'page_title' => __( 'Install Required Plugins', $theme_text_domain ), 'menu_title' => __( 'Install Plugins', $theme_text_domain ), 'installing' => __( 'Installing Plugin: %s', $theme_text_domain ), // %1$s = plugin name 'oops' => __( 'Something went wrong with the plugin API.', $theme_text_domain ), 'notice_can_install_required' => _n_noop( 'This theme requires the following plugin: %1$s.', 'This theme requires the following plugins: %1$s.' ), // %1$s = plugin name(s) 'notice_can_install_recommended' => _n_noop( 'This theme recommends the following plugin: %1$s.', 'This theme recommends the following plugins: %1$s.' ), // %1$s = plugin name(s) 'notice_cannot_install' => _n_noop( 'Sorry, but you do not have the correct permissions to install the %s plugin. Contact the administrator of this site for help on getting the plugin installed.', 'Sorry, but you do not have the correct permissions to install the %s plugins. Contact the administrator of this site for help on getting the plugins installed.' ), // %1$s = plugin name(s) 'notice_can_activate_required' => _n_noop( 'The following required plugin is currently inactive: %1$s.', 'The following required plugins are currently inactive: %1$s.' ), // %1$s = plugin name(s) 'notice_can_activate_recommended' => _n_noop( 'The following recommended plugin is currently inactive: %1$s.', 'The following recommended plugins are currently inactive: %1$s.' ), // %1$s = plugin name(s) 'notice_cannot_activate' => _n_noop( 'Sorry, but you do not have the correct permissions to activate the %s plugin. Contact the administrator of this site for help on getting the plugin activated.', 'Sorry, but you do not have the correct permissions to activate the %s plugins. Contact the administrator of this site for help on getting the plugins activated.' ), // %1$s = plugin name(s) 'notice_ask_to_update' => _n_noop( 'The following plugin needs to be updated to its latest version to ensure maximum compatibility with this theme: %1$s.', 'The following plugins need to be updated to their latest version to ensure maximum compatibility with this theme: %1$s.' ), // %1$s = plugin name(s) 'notice_cannot_update' => _n_noop( 'Sorry, but you do not have the correct permissions to update the %s plugin. Contact the administrator of this site for help on getting the plugin updated.', 'Sorry, but you do not have the correct permissions to update the %s plugins. Contact the administrator of this site for help on getting the plugins updated.' ), // %1$s = plugin name(s) 'install_link' => _n_noop( 'Begin installing plugin', 'Begin installing plugins' ), 'activate_link' => _n_noop( 'Activate installed plugin', 'Activate installed plugins' ), 'return' => __( 'Return to Required Plugins Installer', $theme_text_domain ), 'plugin_activated' => __( 'Plugin activated successfully.', $theme_text_domain ), 'complete' => __( 'All plugins installed and activated successfully. %s', $theme_text_domain ), // %1$s = dashboard link 'nag_type' => 'updated' // Determines admin notice type - can only be 'updated' or 'error' ) ); tgmpa( $plugins, $config ); }
Java
/* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006, 2007 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * UBI input/output unit. * * This unit provides a uniform way to work with all kinds of the underlying * MTD devices. It also implements handy functions for reading and writing UBI * headers. * * We are trying to have a paranoid mindset and not to trust to what we read * from the flash media in order to be more secure and robust. So this unit * validates every single header it reads from the flash media. * * Some words about how the eraseblock headers are stored. * * The erase counter header is always stored at offset zero. By default, the * VID header is stored after the EC header at the closest aligned offset * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID * header at the closest aligned offset. But this default layout may be * changed. For example, for different reasons (e.g., optimization) UBI may be * asked to put the VID header at further offset, and even at an unaligned * offset. Of course, if the offset of the VID header is unaligned, UBI adds * proper padding in front of it. Data offset may also be changed but it has to * be aligned. * * About minimal I/O units. In general, UBI assumes flash device model where * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the * @ubi->mtd->writesize field. But as an exception, UBI admits of using another * (smaller) minimal I/O unit size for EC and VID headers to make it possible * to do different optimizations. * * This is extremely useful in case of NAND flashes which admit of several * write operations to one NAND page. In this case UBI can fit EC and VID * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI * users. * * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID * headers. * * Q: why not just to treat sub-page as a minimal I/O unit of this flash * device, e.g., make @ubi->min_io_size = 512 in the example above? * * A: because when writing a sub-page, MTD still writes a full 2K page but the * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we * prefer to use sub-pages only for EV and VID headers. * * As it was noted above, the VID header may start at a non-aligned offset. * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, * the VID header may reside at offset 1984 which is the last 64 bytes of the * last sub-page (EC header is always at offset zero). This causes some * difficulties when reading and writing VID headers. * * Suppose we have a 64-byte buffer and we read a VID header at it. We change * the data and want to write this VID header out. As we can only write in * 512-byte chunks, we have to allocate one more buffer and copy our VID header * to offset 448 of this buffer. * * The I/O unit does the following trick in order to avoid this extra copy. * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the * VID header is being written out, it shifts the VID header pointer back and * writes the whole sub-page. */ #include <linux/crc32.h> #include <linux/err.h> #include "ubi.h" #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_ec_hdr *ec_hdr); static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr); static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len); #else #define paranoid_check_not_bad(ubi, pnum) 0 #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 #define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0 #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 #define paranoid_check_all_ff(ubi, pnum, offset, len) 0 #endif /** * ubi_io_read - read data from a physical eraseblock. * @ubi: UBI device description object * @buf: buffer where to store the read data * @pnum: physical eraseblock number to read from * @offset: offset within the physical eraseblock from where to read * @len: how many bytes to read * * This function reads data from offset @offset of physical eraseblock @pnum * and stores the read data in the @buf buffer. The following return codes are * possible: * * o %0 if all the requested data were successfully read; * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but * correctable bit-flips were detected; this is harmless but may indicate * that this eraseblock may become bad soon (but do not have to); * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for * example it can be an ECC error in case of NAND; this most probably means * that the data is corrupted; * o %-EIO if some I/O error occurred; * o other negative error codes in case of other errors. */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, int len) { int err, retries = 0; size_mtd_t read; loff_mtd_t addr; dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); ubi_assert(len > 0); err = paranoid_check_not_bad(ubi, pnum); if (err) return err > 0 ? -EINVAL : err; addr = (loff_mtd_t)pnum * ubi->peb_size + offset; retry: err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); if (err) { if (err == -EUCLEAN) { /* * -EUCLEAN is reported if there was a bit-flip which * was corrected, so this is harmless. */ ubi_msg("fixable bit-flip detected at PEB %d", pnum); ubi_assert(len == read); return UBI_IO_BITFLIPS; } if (read != len && retries++ < UBI_IO_RETRIES) { dbg_io("error %d while reading %d bytes from PEB %d:%d, " "read only %lld bytes, retry", err, len, pnum, offset, read); yield(); goto retry; } ubi_err("error %d while reading %d bytes from PEB %d:%d, " "read %lld bytes", err, len, pnum, offset, read); ubi_dbg_dump_stack(); /* * The driver should never return -EBADMSG if it failed to read * all the requested data. But some buggy drivers might do * this, so we change it to -EIO. */ if (read != len && err == -EBADMSG) { ubi_assert(0); err = -EIO; } } else { ubi_assert(len == read); if (ubi_dbg_is_bitflip()) { dbg_msg("bit-flip (emulated)"); err = UBI_IO_BITFLIPS; } } return err; } /** * ubi_io_write - write data to a physical eraseblock. * @ubi: UBI device description object * @buf: buffer with the data to write * @pnum: physical eraseblock number to write to * @offset: offset within the physical eraseblock where to write * @len: how many bytes to write * * This function writes @len bytes of data from buffer @buf to offset @offset * of physical eraseblock @pnum. If all the data were successfully written, * zero is returned. If an error occurred, this function returns a negative * error code. If %-EIO is returned, the physical eraseblock most probably went * bad. * * Note, in case of an error, it is possible that something was still written * to the flash media, but may be some garbage. */ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len) { int err; size_mtd_t written; loff_mtd_t addr; dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); ubi_assert(offset % ubi->hdrs_min_io_size == 0); ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); if (ubi->ro_mode) { ubi_err("read-only mode"); return -EROFS; } /* The below has to be compiled out if paranoid checks are disabled */ err = paranoid_check_not_bad(ubi, pnum); if (err) return err > 0 ? -EINVAL : err; /* The area we are writing to has to contain all 0xFF bytes */ err = paranoid_check_all_ff(ubi, pnum, offset, len); if (err) return err > 0 ? -EINVAL : err; if (offset >= ubi->leb_start) { /* * We write to the data area of the physical eraseblock. Make * sure it has valid EC and VID headers. */ err = paranoid_check_peb_ec_hdr(ubi, pnum); if (err) return err > 0 ? -EINVAL : err; err = paranoid_check_peb_vid_hdr(ubi, pnum); if (err) return err > 0 ? -EINVAL : err; } if (ubi_dbg_is_write_failure()) { dbg_err("cannot write %d bytes to PEB %d:%d " "(emulated)", len, pnum, offset); ubi_dbg_dump_stack(); return -EIO; } addr = (loff_mtd_t)pnum * ubi->peb_size + offset; err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); if (err) { ubi_err("error %d while writing %d bytes to PEB %d:%d, written" " %lld bytes", err, len, pnum, offset, written); ubi_dbg_dump_stack(); } else ubi_assert(written == len); return err; } /** * erase_callback - MTD erasure call-back. * @ei: MTD erase information object. * * Note, even though MTD erase interface is asynchronous, all the current * implementations are synchronous anyway. */ static void erase_callback(struct erase_info *ei) { wake_up_interruptible((wait_queue_head_t *)ei->priv); } /** * do_sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @pnum: the physical eraseblock number to erase * * This function synchronously erases physical eraseblock @pnum and returns * zero in case of success and a negative error code in case of failure. If * %-EIO is returned, the physical eraseblock most probably went bad. */ static int do_sync_erase(struct ubi_device *ubi, int pnum) { int err, retries = 0; struct erase_info ei; wait_queue_head_t wq; dbg_io("erase PEB %d", pnum); retry: init_waitqueue_head(&wq); memset(&ei, 0, sizeof(struct erase_info)); ei.mtd = ubi->mtd; ei.addr = (loff_mtd_t)pnum * ubi->peb_size; ei.len = ubi->peb_size; ei.callback = erase_callback; ei.priv = (unsigned long)&wq; err = ubi->mtd->erase(ubi->mtd, &ei); if (err) { if (retries++ < UBI_IO_RETRIES) { dbg_io("error %d while erasing PEB %d, retry", err, pnum); yield(); goto retry; } ubi_err("cannot erase PEB %d, error %d", pnum, err); ubi_dbg_dump_stack(); return err; } err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || ei.state == MTD_ERASE_FAILED); if (err) { ubi_err("interrupted PEB %d erasure", pnum); return -EINTR; } if (ei.state == MTD_ERASE_FAILED) { if (retries++ < UBI_IO_RETRIES) { dbg_io("error while erasing PEB %d, retry", pnum); yield(); goto retry; } ubi_err("cannot erase PEB %d", pnum); ubi_dbg_dump_stack(); return -EIO; } err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); if (err) return err > 0 ? -EINVAL : err; if (ubi_dbg_is_erase_failure() && !err) { dbg_err("cannot erase PEB %d (emulated)", pnum); return -EIO; } return 0; } /** * check_pattern - check if buffer contains only a certain byte pattern. * @buf: buffer to check * @patt: the pattern to check * @size: buffer size in bytes * * This function returns %1 in there are only @patt bytes in @buf, and %0 if * something else was also found. */ static int check_pattern(const void *buf, uint8_t patt, int size) { int i; for (i = 0; i < size; i++) if (((const uint8_t *)buf)[i] != patt) return 0; return 1; } /* Patterns to write to a physical eraseblock when torturing it */ static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; /** * torture_peb - test a supposedly bad physical eraseblock. * @ubi: UBI device description object * @pnum: the physical eraseblock number to test * * This function returns %-EIO if the physical eraseblock did not pass the * test, a positive number of erase operations done if the test was * successfully passed, and other negative error codes in case of other errors. */ static int torture_peb(struct ubi_device *ubi, int pnum) { int err, i, patt_count; patt_count = ARRAY_SIZE(patterns); ubi_assert(patt_count > 0); mutex_lock(&ubi->buf_mutex); for (i = 0; i < patt_count; i++) { err = do_sync_erase(ubi, pnum); if (err) goto out; /* Make sure the PEB contains only 0xFF bytes */ err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); if (err) goto out; err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); if (err == 0) { ubi_err("erased PEB %d, but a non-0xFF byte found", pnum); err = -EIO; goto out; } /* Write a pattern and check it */ memset(ubi->peb_buf1, patterns[i], ubi->peb_size); err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); if (err) goto out; memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size); err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); if (err) goto out; err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size); if (err == 0) { ubi_err("pattern %x checking failed for PEB %d", patterns[i], pnum); err = -EIO; goto out; } } err = patt_count; out: mutex_unlock(&ubi->buf_mutex); if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { /* * If a bit-flip or data integrity error was detected, the test * has not passed because it happened on a freshly erased * physical eraseblock which means something is wrong with it. */ ubi_err("read problems on freshly erased PEB %d, must be bad", pnum); err = -EIO; } return err; } /** * ubi_io_sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @pnum: physical eraseblock number to erase * @torture: if this physical eraseblock has to be tortured * * This function synchronously erases physical eraseblock @pnum. If @torture * flag is not zero, the physical eraseblock is checked by means of writing * different patterns to it and reading them back. If the torturing is enabled, * the physical eraseblock is erased more then once. * * This function returns the number of erasures made in case of success, %-EIO * if the erasure failed or the torturing test failed, and other negative error * codes in case of other errors. Note, %-EIO means that the physical * eraseblock is bad. */ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) { int err, ret = 0; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); err = paranoid_check_not_bad(ubi, pnum); if (err != 0) return err > 0 ? -EINVAL : err; if (ubi->ro_mode) { ubi_err("read-only mode"); return -EROFS; } if (torture) { ret = torture_peb(ubi, pnum); if (ret < 0) return ret; } err = do_sync_erase(ubi, pnum); if (err) return err; return ret + 1; } /** * ubi_io_is_bad - check if a physical eraseblock is bad. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns a positive number if the physical eraseblock is bad, * zero if not, and a negative error code if an error occurred. */ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) { struct mtd_info *mtd = ubi->mtd; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->bad_allowed) { int ret; ret = mtd->block_isbad(mtd, (loff_mtd_t)pnum * ubi->peb_size); if (ret < 0) ubi_err("error %d while checking if PEB %d is bad", ret, pnum); else if (ret) dbg_io("PEB %d is bad", pnum); return ret; } return 0; } /** * ubi_io_mark_bad - mark a physical eraseblock as bad. * @ubi: UBI device description object * @pnum: the physical eraseblock number to mark * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) { int err; struct mtd_info *mtd = ubi->mtd; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->ro_mode) { ubi_err("read-only mode"); return -EROFS; } if (!ubi->bad_allowed) return 0; err = mtd->block_markbad(mtd, (loff_mtd_t)pnum * ubi->peb_size); if (err) ubi_err("cannot mark PEB %d bad, error %d", pnum, err); return err; } /** * validate_ec_hdr - validate an erase counter header. * @ubi: UBI device description object * @ec_hdr: the erase counter header to check * * This function returns zero if the erase counter header is OK, and %1 if * not. */ static int validate_ec_hdr(const struct ubi_device *ubi, const struct ubi_ec_hdr *ec_hdr) { long long ec; int vid_hdr_offset, leb_start; ec = be64_to_cpu(ec_hdr->ec); vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); leb_start = be32_to_cpu(ec_hdr->data_offset); if (ec_hdr->version != UBI_VERSION) { ubi_err("node with incompatible UBI version found: " "this UBI version is %d, image version is %d", UBI_VERSION, (int)ec_hdr->version); goto bad; } if (vid_hdr_offset != ubi->vid_hdr_offset) { ubi_err("bad VID header offset %d, expected %d", vid_hdr_offset, ubi->vid_hdr_offset); goto bad; } if (leb_start != ubi->leb_start) { ubi_err("bad data offset %d, expected %d", leb_start, ubi->leb_start); goto bad; } if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { ubi_err("bad erase counter %lld", ec); goto bad; } return 0; bad: ubi_err("bad EC header"); ubi_dbg_dump_ec_hdr(ec_hdr); ubi_dbg_dump_stack(); return 1; } /** * ubi_io_read_ec_hdr - read and check an erase counter header. * @ubi: UBI device description object * @pnum: physical eraseblock to read from * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter * header * @verbose: be verbose if the header is corrupted or was not found * * This function reads erase counter header from physical eraseblock @pnum and * stores it in @ec_hdr. This function also checks CRC checksum of the read * erase counter header. The following codes may be returned: * * o %0 if the CRC checksum is correct and the header was successfully read; * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected * and corrected by the flash driver; this is harmless but may indicate that * this eraseblock may become bad soon (but may be not); * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error); * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; * o a negative error code in case of failure. */ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr, int verbose) { int err, read_err = 0; uint32_t crc, magic, hdr_crc; dbg_io("read EC header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (UBI_IO_DEBUG) verbose = 1; err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (err) { if (err != UBI_IO_BITFLIPS && err != -EBADMSG) return err; /* * We read all the data, but either a correctable bit-flip * occurred, or MTD reported about some data integrity error, * like an ECC error in case of NAND. The former is harmless, * the later may mean that the read data is corrupted. But we * have a CRC check-sum and we will detect this. If the EC * header is still OK, we just report this as there was a * bit-flip. */ read_err = err; } magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { /* * The magic field is wrong. Let's check if we have read all * 0xFF. If yes, this physical eraseblock is assumed to be * empty. * * But if there was a read error, we do not test it for all * 0xFFs. Even if it does contain all 0xFFs, this error * indicates that something is still wrong with this physical * eraseblock and we anyway cannot treat it as empty. */ if (read_err != -EBADMSG && check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ /* * The below is just a paranoid check, it has to be * compiled out if paranoid checks are disabled. */ err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); if (err) return err > 0 ? UBI_IO_BAD_EC_HDR : err; if (verbose) ubi_warn("no EC header found at PEB %d, " "only 0xFF bytes", pnum); return UBI_IO_PEB_EMPTY; } /* * This is not a valid erase counter header, and these are not * 0xFF bytes. Report that the header is corrupted. */ if (verbose) { ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dbg_dump_ec_hdr(ec_hdr); } return UBI_IO_BAD_EC_HDR; } crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); if (hdr_crc != crc) { if (verbose) { ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," " read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_ec_hdr(ec_hdr); } return UBI_IO_BAD_EC_HDR; } /* And of course validate what has just been read from the media */ err = validate_ec_hdr(ubi, ec_hdr); if (err) { ubi_err("validation failed for PEB %d", pnum); return -EINVAL; } return read_err ? UBI_IO_BITFLIPS : 0; } /** * ubi_io_write_ec_hdr - write an erase counter header. * @ubi: UBI device description object * @pnum: physical eraseblock to write to * @ec_hdr: the erase counter header to write * * This function writes erase counter header described by @ec_hdr to physical * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec * field. * * This function returns zero in case of success and a negative error code in * case of failure. If %-EIO is returned, the physical eraseblock most probably * went bad. */ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr) { int err; uint32_t crc; dbg_io("write EC header to PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC); ec_hdr->version = UBI_VERSION; ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); ec_hdr->hdr_crc = cpu_to_be32(crc); err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); if (err) return -EINVAL; err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); return err; } /** * validate_vid_hdr - validate a volume identifier header. * @ubi: UBI device description object * @vid_hdr: the volume identifier header to check * * This function checks that data stored in the volume identifier header * @vid_hdr. Returns zero if the VID header is OK and %1 if not. */ static int validate_vid_hdr(const struct ubi_device *ubi, const struct ubi_vid_hdr *vid_hdr) { int vol_type = vid_hdr->vol_type; int copy_flag = vid_hdr->copy_flag; int vol_id = be32_to_cpu(vid_hdr->vol_id); int lnum = be32_to_cpu(vid_hdr->lnum); int compat = vid_hdr->compat; int data_size = be32_to_cpu(vid_hdr->data_size); int used_ebs = be32_to_cpu(vid_hdr->used_ebs); int data_pad = be32_to_cpu(vid_hdr->data_pad); int data_crc = be32_to_cpu(vid_hdr->data_crc); int usable_leb_size = ubi->leb_size - data_pad; if (copy_flag != 0 && copy_flag != 1) { dbg_err("bad copy_flag"); goto bad; } if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || data_pad < 0) { dbg_err("negative values"); goto bad; } if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { dbg_err("bad vol_id"); goto bad; } if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { dbg_err("bad compat"); goto bad; } if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && compat != UBI_COMPAT_REJECT) { dbg_err("bad compat"); goto bad; } if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { dbg_err("bad vol_type"); goto bad; } if (data_pad >= ubi->leb_size / 2) { dbg_err("bad data_pad"); goto bad; } if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may * contain zero bytes of data, but no VID headers can contain * zero at these fields, because they empty volumes do not have * mapped logical eraseblocks. */ if (used_ebs == 0) { dbg_err("zero used_ebs"); goto bad; } if (data_size == 0) { dbg_err("zero data_size"); goto bad; } if (lnum < used_ebs - 1) { if (data_size != usable_leb_size) { dbg_err("bad data_size"); goto bad; } } else if (lnum == used_ebs - 1) { if (data_size == 0) { dbg_err("bad data_size at last LEB"); goto bad; } } else { dbg_err("too high lnum"); goto bad; } } else { if (copy_flag == 0) { if (data_crc != 0) { dbg_err("non-zero data CRC"); goto bad; } if (data_size != 0) { dbg_err("non-zero data_size"); goto bad; } } else { if (data_size == 0) { dbg_err("zero data_size of copy"); goto bad; } } if (used_ebs != 0) { dbg_err("bad used_ebs"); goto bad; } } return 0; bad: ubi_err("bad VID header"); ubi_dbg_dump_vid_hdr(vid_hdr); ubi_dbg_dump_stack(); return 1; } /** * ubi_io_read_vid_hdr - read and check a volume identifier header. * @ubi: UBI device description object * @pnum: physical eraseblock number to read from * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume * identifier header * @verbose: be verbose if the header is corrupted or wasn't found * * This function reads the volume identifier header from physical eraseblock * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read * volume identifier header. The following codes may be returned: * * o %0 if the CRC checksum is correct and the header was successfully read; * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected * and corrected by the flash driver; this is harmless but may indicate that * this eraseblock may become bad soon; * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC * error detected); * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID * header there); * o a negative error code in case of failure. */ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_hdr *vid_hdr, int verbose) { int err, read_err = 0; uint32_t crc, magic, hdr_crc; void *p; dbg_io("read VID header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (UBI_IO_DEBUG) verbose = 1; p = (char *)vid_hdr - ubi->vid_hdr_shift; err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); if (err) { if (err != UBI_IO_BITFLIPS && err != -EBADMSG) return err; /* * We read all the data, but either a correctable bit-flip * occurred, or MTD reported about some data integrity error, * like an ECC error in case of NAND. The former is harmless, * the later may mean the read data is corrupted. But we have a * CRC check-sum and we will identify this. If the VID header is * still OK, we just report this as there was a bit-flip. */ read_err = err; } magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { /* * If we have read all 0xFF bytes, the VID header probably does * not exist and the physical eraseblock is assumed to be free. * * But if there was a read error, we do not test the data for * 0xFFs. Even if it does contain all 0xFFs, this error * indicates that something is still wrong with this physical * eraseblock and it cannot be regarded as free. */ if (read_err != -EBADMSG && check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { /* The physical eraseblock is supposedly free */ /* * The below is just a paranoid check, it has to be * compiled out if paranoid checks are disabled. */ err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, ubi->leb_size); if (err) return err > 0 ? UBI_IO_BAD_VID_HDR : err; if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); return UBI_IO_PEB_FREE; } /* * This is not a valid VID header, and these are not 0xFF * bytes. Report that the header is corrupted. */ if (verbose) { ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dbg_dump_vid_hdr(vid_hdr); } return UBI_IO_BAD_VID_HDR; } crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { if (verbose) { ubi_warn("bad CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_vid_hdr(vid_hdr); } return UBI_IO_BAD_VID_HDR; } /* Validate the VID header that we have just read */ err = validate_vid_hdr(ubi, vid_hdr); if (err) { ubi_err("validation failed for PEB %d", pnum); return -EINVAL; } return read_err ? UBI_IO_BITFLIPS : 0; } /** * ubi_io_write_vid_hdr - write a volume identifier header. * @ubi: UBI device description object * @pnum: the physical eraseblock number to write to * @vid_hdr: the volume identifier header to write * * This function writes the volume identifier header described by @vid_hdr to * physical eraseblock @pnum. This function automatically fills the * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates * header CRC checksum and stores it at vid_hdr->hdr_crc. * * This function returns zero in case of success and a negative error code in * case of failure. If %-EIO is returned, the physical eraseblock probably went * bad. */ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_hdr *vid_hdr) { int err; uint32_t crc; void *p; dbg_io("write VID header to PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); err = paranoid_check_peb_ec_hdr(ubi, pnum); if (err) return err > 0 ? -EINVAL: err; vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); vid_hdr->version = UBI_VERSION; crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); vid_hdr->hdr_crc = cpu_to_be32(crc); err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); if (err) return -EINVAL; p = (char *)vid_hdr - ubi->vid_hdr_shift; err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); return err; } #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID /** * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. * @ubi: UBI device description object * @pnum: physical eraseblock number to check * * This function returns zero if the physical eraseblock is good, a positive * number if it is bad and a negative error code if an error occurred. */ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum) { int err; err = ubi_io_is_bad(ubi, pnum); if (!err) return err; ubi_err("paranoid check failed for PEB %d", pnum); ubi_dbg_dump_stack(); return err; } /** * paranoid_check_ec_hdr - check if an erase counter header is all right. * @ubi: UBI device description object * @pnum: physical eraseblock number the erase counter header belongs to * @ec_hdr: the erase counter header to check * * This function returns zero if the erase counter header contains valid * values, and %1 if not. */ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_ec_hdr *ec_hdr) { int err; uint32_t magic; magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { ubi_err("bad magic %#08x, must be %#08x", magic, UBI_EC_HDR_MAGIC); goto fail; } err = validate_ec_hdr(ubi, ec_hdr); if (err) { ubi_err("paranoid check failed for PEB %d", pnum); goto fail; } return 0; fail: ubi_dbg_dump_ec_hdr(ec_hdr); ubi_dbg_dump_stack(); return 1; } /** * paranoid_check_peb_ec_hdr - check that the erase counter header of a * physical eraseblock is in-place and is all right. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns zero if the erase counter header is all right, %1 if * not, and a negative error code if an error occurred. */ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) { int err; uint32_t crc, hdr_crc; struct ubi_ec_hdr *ec_hdr; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) goto exit; crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); ubi_err("paranoid check failed for PEB %d", pnum); ubi_dbg_dump_ec_hdr(ec_hdr); ubi_dbg_dump_stack(); err = 1; goto exit; } err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); exit: kfree(ec_hdr); return err; } /** * paranoid_check_vid_hdr - check that a volume identifier header is all right. * @ubi: UBI device description object * @pnum: physical eraseblock number the volume identifier header belongs to * @vid_hdr: the volume identifier header to check * * This function returns zero if the volume identifier header is all right, and * %1 if not. */ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr) { int err; uint32_t magic; magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", magic, pnum, UBI_VID_HDR_MAGIC); goto fail; } err = validate_vid_hdr(ubi, vid_hdr); if (err) { ubi_err("paranoid check failed for PEB %d", pnum); goto fail; } return err; fail: ubi_err("paranoid check failed for PEB %d", pnum); ubi_dbg_dump_vid_hdr(vid_hdr); ubi_dbg_dump_stack(); return 1; } /** * paranoid_check_peb_vid_hdr - check that the volume identifier header of a * physical eraseblock is in-place and is all right. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns zero if the volume identifier header is all right, * %1 if not, and a negative error code if an error occurred. */ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) { int err; uint32_t crc, hdr_crc; struct ubi_vid_hdr *vid_hdr; void *p; vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; p = (char *)vid_hdr - ubi->vid_hdr_shift; err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) goto exit; crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); ubi_err("paranoid check failed for PEB %d", pnum); ubi_dbg_dump_vid_hdr(vid_hdr); ubi_dbg_dump_stack(); err = 1; goto exit; } err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); exit: ubi_free_vid_hdr(ubi, vid_hdr); return err; } /** * paranoid_check_all_ff - check that a region of flash is empty. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @offset: the starting offset within the physical eraseblock to check * @len: the length of the region to check * * This function returns zero if only 0xFF bytes are present at offset * @offset of the physical eraseblock @pnum, %1 if not, and a negative error * code if an error occurred. */ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) { size_mtd_t read; int err; loff_mtd_t addr = (loff_mtd_t)pnum * ubi->peb_size + offset; mutex_lock(&ubi->dbg_buf_mutex); err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf); if (err && err != -EUCLEAN) { ubi_err("error %d while reading %d bytes from PEB %d:%d, " "read %lld bytes", err, len, pnum, offset, read); goto error; } err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); if (err == 0) { ubi_err("flash region at PEB %d:%d, length %d does not " "contain all 0xFF bytes", pnum, offset, len); goto fail; } mutex_unlock(&ubi->dbg_buf_mutex); return 0; fail: ubi_err("paranoid check failed for PEB %d", pnum); dbg_msg("hex dump of the %d-%d region", offset, offset + len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->dbg_peb_buf, len, 1); err = 1; error: ubi_dbg_dump_stack(); mutex_unlock(&ubi->dbg_buf_mutex); return err; } #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
Java
""" This page is in the table of contents. Plugin to home the tool at beginning of each layer. The home manual page is at: http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home ==Operation== The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done. ==Settings== ===Name of Home File=== Default: home.gcode At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder. ==Examples== The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py. > python home.py This brings up the home dialog. > python home.py Screw Holder Bottom.stl The home tool is parsing the file: Screw Holder Bottom.stl .. The home tool has created the file: .. Screw Holder Bottom_home.gcode """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_utilities import skeinforge_craft from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import math import os import sys __author__ = 'Enrique Perez ([email protected])' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedText( fileName, text, repository = None ): "Home a gcode linear move file or text." return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository) def getCraftedTextFromText( gcodeText, repository = None ): "Home a gcode linear move text." if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'): return gcodeText if repository == None: repository = settings.getReadRepository( HomeRepository() ) if not repository.activateHome.value: return gcodeText return HomeSkein().getCraftedGcode(gcodeText, repository) def getNewRepository(): 'Get new repository.' return HomeRepository() def writeOutput(fileName, shouldAnalyze=True): "Home a gcode linear move file. Chain home the gcode if it is not already homed." skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze) class HomeRepository: "A class to handle the home settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home') self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True ) self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode') self.executeTitle = 'Home' def execute(self): "Home button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class HomeSkein: "A class to home a skein of extrusions." def __init__(self): self.distanceFeedRate = gcodec.DistanceFeedRate() self.extruderActive = False self.highestZ = None self.homeLines = [] self.layerCount = settings.LayerCount() self.lineIndex = 0 self.lines = None self.oldLocation = None self.shouldHome = False self.travelFeedRateMinute = 957.0 def addFloat( self, begin, end ): "Add dive to the original height." beginEndDistance = begin.distance(end) alongWay = self.absolutePerimeterWidth / beginEndDistance closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin ) closeToEnd.z = self.highestZ self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) ) def addHomeTravel( self, splitLine ): "Add the home travel gcode." location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) self.highestZ = max( self.highestZ, location.z ) if not self.shouldHome: return self.shouldHome = False if self.oldLocation == None: return if self.extruderActive: self.distanceFeedRate.addLine('M103') self.addHopUp( self.oldLocation ) self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines) self.addHopUp( self.oldLocation ) self.addFloat( self.oldLocation, location ) if self.extruderActive: self.distanceFeedRate.addLine('M101') def addHopUp(self, location): "Add hop to highest point." locationUp = Vector3( location.x, location.y, self.highestZ ) self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) ) def getCraftedGcode( self, gcodeText, repository ): "Parse gcode text and store the home gcode." self.repository = repository self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value) if len(self.homeLines) < 1: return gcodeText self.lines = archive.getTextLines(gcodeText) self.parseInitialization( repository ) for self.lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[self.lineIndex] self.parseLine(line) return self.distanceFeedRate.output.getvalue() def parseInitialization( self, repository ): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) self.distanceFeedRate.parseSplitLine(firstWord, splitLine) if firstWord == '(</extruderInitialization>)': self.distanceFeedRate.addTagBracketedProcedure('home') return elif firstWord == '(<perimeterWidth>': self.absolutePerimeterWidth = abs(float(splitLine[1])) elif firstWord == '(<travelFeedRatePerSecond>': self.travelFeedRateMinute = 60.0 * float(splitLine[1]) self.distanceFeedRate.addLine(line) def parseLine(self, line): "Parse a gcode line and add it to the bevel gcode." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if firstWord == 'G1': self.addHomeTravel(splitLine) self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) elif firstWord == '(<layer>': self.layerCount.printProgressIncrement('home') if len(self.homeLines) > 0: self.shouldHome = True elif firstWord == 'M101': self.extruderActive = True elif firstWord == 'M103': self.extruderActive = False self.distanceFeedRate.addLine(line) def main(): "Display the home dialog." if len(sys.argv) > 1: writeOutput(' '.join(sys.argv[1 :])) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == "__main__": main()
Java
/* * Copyright (C) 2005-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #pragma once #include "settings/dialogs/GUIDialogSettingsManualBase.h" class CFileItem; namespace PERIPHERALS { class CGUIDialogPeripheralSettings : public CGUIDialogSettingsManualBase { public: CGUIDialogPeripheralSettings(); ~CGUIDialogPeripheralSettings() override; // specializations of CGUIControl bool OnMessage(CGUIMessage& message) override; virtual void SetFileItem(const CFileItem* item); protected: // implementations of ISettingCallback void OnSettingChanged(std::shared_ptr<const CSetting> setting) override; // specialization of CGUIDialogSettingsBase bool AllowResettingSettings() const override { return false; } void Save() override; void OnResetSettings() override; void SetupView() override; // specialization of CGUIDialogSettingsManualBase void InitializeSettings() override; CFileItem* m_item; bool m_initialising = false; std::map<std::string, std::shared_ptr<CSetting>> m_settingsMap; }; } // namespace PERIPHERALS
Java
/* * This file is part of the TrinityCore Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "scholomance.h" #include "ScriptedCreature.h" enum Spells { SPELL_CURSE_OF_BLOOD = 24673, SPELL_ILLUSION = 17773, SPELL_DROP_JOURNAL = 26096 }; enum Events { EVENT_CURSE_OF_BLOOD = 1, EVENT_ILLUSION, EVENT_CLEAVE, EVENT_SET_VISIBILITY }; class boss_jandice_barov : public CreatureScript { public: boss_jandice_barov() : CreatureScript("boss_jandice_barov") { } struct boss_jandicebarovAI : public ScriptedAI { boss_jandicebarovAI(Creature* creature) : ScriptedAI(creature), Summons(me) { } void Reset() override { events.Reset(); Summons.DespawnAll(); } void JustSummoned(Creature* summoned) override { // Illusions should attack a random target. if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) summoned->AI()->AttackStart(target); summoned->ApplySpellImmune(0, IMMUNITY_DAMAGE, SPELL_SCHOOL_MASK_MAGIC, true); // Not sure if this is correct. Summons.Summon(summoned); } void JustEngagedWith(Unit* /*who*/) override { events.ScheduleEvent(EVENT_CURSE_OF_BLOOD, 15s); events.ScheduleEvent(EVENT_ILLUSION, 30s); } void JustDied(Unit* /*killer*/) override { Summons.DespawnAll(); DoCastSelf(SPELL_DROP_JOURNAL, true); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_CURSE_OF_BLOOD: DoCastVictim(SPELL_CURSE_OF_BLOOD); events.ScheduleEvent(EVENT_CURSE_OF_BLOOD, 30s); break; case EVENT_ILLUSION: DoCast(SPELL_ILLUSION); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); me->SetDisplayId(11686); // Invisible Model ModifyThreatByPercent(me->GetVictim(), -99); events.ScheduleEvent(EVENT_SET_VISIBILITY, 3s); events.ScheduleEvent(EVENT_ILLUSION, 25s); break; case EVENT_SET_VISIBILITY: me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); me->SetDisplayId(11073); //Jandice Model break; default: break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } DoMeleeAttackIfReady(); } private: EventMap events; SummonList Summons; }; CreatureAI* GetAI(Creature* creature) const override { return GetScholomanceAI<boss_jandicebarovAI>(creature); } }; void AddSC_boss_jandicebarov() { new boss_jandice_barov(); }
Java
make gio_rev03_defconfig make
Java
<?php /** * @package AkeebaBackup * * @license GNU General Public License, version 2 or later * @author Nicholas K. Dionysopoulos * @copyright Copyright 2006-2009 Nicholas K. Dionysopoulos * @since 1.3 */ defined('_JEXEC') or die(); /** * Database Table filter Model class * */ class AkeebaModelDbefs extends FOFModel { /** * Returns a list of the database tables, views, procedures, functions and triggers, * along with their filter status in array format, for use in the GUI * @return array */ public function make_listing($root) { // Get database inclusion filters $filters = AEFactory::getFilters(); $database_list = $filters->getInclusions('db'); // Load the database object for the selected database $config = $database_list[$root]; $config['user'] = $config['username']; $db = AEFactory::getDatabase($config); // Load the table data $table_data = $db->getTables(); // Process filters $tables = array(); if(!empty($table_data)) { foreach($table_data as $table_name => $table_type) { $status = array(); // Add table type $status['type'] = $table_type; // Check dbobject/all filter (exclude) $result = $filters->isFilteredExtended($table_name, $root, 'dbobject', 'all', $byFilter); $status['tables'] = (!$result) ? 0 : (( $byFilter == 'tables' ) ? 1 : 2 ); // Check dbobject/content filter (skip table data) $result = $filters->isFilteredExtended($table_name, $root, 'dbobject', 'content', $byFilter); $status['tabledata'] = (!$result) ? 0 : (( $byFilter == 'tabledata' ) ? 1 : 2 ); if( $table_type != 'table' ) $status['tabledata'] = 2; // We can't filter contents of views, merge tables, black holes, procedures, functions and triggers :) $tables[$table_name] = $status; } } return array( 'tables' => $tables, 'root' => $root ); } /** * Returns an array containing a mapping of db root names and their human-readable representation * @return array Array of objects; "value" contains the root name, "text" the human-readable text */ public function get_roots() { // Get database inclusion filters $filters = AEFactory::getFilters(); $database_list = $filters->getInclusions('db'); $ret = array(); foreach($database_list as $name => $definition) { $root = $definition['host']; if(!empty($definition['port'])) $root.=':'.$definition['port']; $root.='/'.$definition['database']; if($name == '[SITEDB]') $root = JText::_('DBFILTER_LABEL_SITEDB'); $entry = new stdClass(); $entry->value = $name; $entry->text = $root; $ret[] = $entry; } return $ret; } /** * Toggle a filter * @param string $root Root directory * @param string $item The child item of the current directory we want to toggle the filter for * @param string $filter The name of the filter to apply (directories, skipfiles, skipdirs, files) * @return array */ public function toggle($root, $item, $filter) { if(empty($item)) return array( 'success' => false, 'newstate'=> false ); // Get a reference to the global Filters object $filters = AEFactory::getFilters(); // Get the specific filter object $filter = AEFactory::getFilterObject($filter); // Toggle the filter $success = $filter->toggle($root, $item, $new_status); // Save the data on success if($success) $filters->save(); // Make a return array return array( 'success' => $success, 'newstate' => $new_status ); } /** * Set a filter * @param string $root Root directory * @param string $item The child item of the current directory we want to set the filter for * @param string $filter The name of the filter to apply (directories, skipfiles, skipdirs, files) * @return array */ public function remove($root, $item, $filter) { if(empty($item)) return array( 'success' => false, 'newstate'=> false ); // Get a reference to the global Filters object $filters = AEFactory::getFilters(); // Get the specific filter object $filter = AEFactory::getFilterObject($filter); // Toggle the filter $success = $filter->remove($root, $item); // Save the data on success if($success) $filters->save(); // Make a return array return array( 'success' => $success, 'newstate' => !$success // The new state of the filter. It is removed if and only if the transaction succeeded ); } /** * Set a filter * @param string $root Root directory * @param string $item The child item of the current directory we want to set the filter for * @param string $filter The name of the filter to apply (directories, skipfiles, skipdirs, files) * @return array */ public function set($root, $item, $filter) { if(empty($item)) return array( 'success' => false, 'newstate'=> false ); // Get a reference to the global Filters object $filters = AEFactory::getFilters(); // Get the specific filter object $filter = AEFactory::getFilterObject($filter); // Toggle the filter $success = $filter->set($root, $item); // Save the data on success if($success) $filters->save(); // Make a return array return array( 'success' => $success, 'newstate' => $success // The new state of the filter. It is set if and only if the transaction succeeded ); } /** * Swap a filter * @param string $root Root directory * @param string $item The child item of the current directory we want to set the filter for * @param string $filter The name of the filter to apply (directories, skipfiles, skipdirs, files) * @return array */ public function swap($root, $old_item, $new_item, $filter) { if(empty($new_item)) return array( 'success' => false, 'newstate'=> false ); // Get a reference to the global Filters object $filters = AEFactory::getFilters(); // Get the specific filter object $filter = AEFactory::getFilterObject($filter); // Toggle the filter if(!empty($old_item)) { $success = $filter->remove($root, $old_item); } else { $success = true; } if($success) { $success = $filter->set($root, $new_item); } // Save the data on success if($success) $filters->save(); // Make a return array return array( 'success' => $success, 'newstate' => $success // The new state of the filter. It is set if and only if the transaction succeeded ); } /** * Retrieves the filters as an array. Used for the tabular filter editor. * @param string $root The root node to search filters on * @return array A collection of hash arrays containing node and type for each filtered element */ public function &get_filters($root) { // A reference to the global Akeeba Engine filter object $filters = AEFactory::getFilters(); // Initialize the return array $ret = array(); // Define the known filter types and loop through them $filter_types = array('tables', 'tabledata'); foreach($filter_types as $type) { $rawFilterData = $filters->getFilterData($type); if( array_key_exists($root, $rawFilterData) ) { if(!empty($rawFilterData[$root])) { foreach($rawFilterData[$root] as $node) { $ret[] = array ( 'node' => substr($node,0), // Make sure we get a COPY, not a reference to the original data 'type' => $type ); } } } } /* * Return array format: * [array] : * [array] : * 'node' => 'somedir' * 'type' => 'directories' * [array] : * 'node' => 'somefile' * 'type' => 'files' * ... */ return $ret; } /** * Resets the filters * @param string $root Root directory * @return array */ public function resetFilters($root) { // Get a reference to the global Filters object $filters = AEFactory::getFilters(); $filter = AEFactory::getFilterObject('tables'); $filter->reset($root); $filter = AEFactory::getFilterObject('tabledata'); $filter->reset($root); $filters->save(); return $this->make_listing($root); } function doAjax() { $action = $this->getState('action'); $verb = array_key_exists('verb', get_object_vars($action)) ? $action->verb : null; $ret_array = array(); switch($verb) { // Return a listing for the normal view case 'list': $ret_array = $this->make_listing($action->root, $action->node); break; // Toggle a filter's state case 'toggle': $ret_array = $this->toggle($action->root, $action->node, $action->filter); break; // Set a filter (used by the editor) case 'set': $ret_array = $this->set($action->root, $action->node, $action->filter); break; // Remove a filter (used by the editor) case 'remove': $ret_array = $this->remove($action->root, $action->node, $action->filter); break; // Swap a filter (used by the editor) case 'swap': $ret_array = $this->swap($action->root, $action->old_node, $action->new_node, $action->filter); break; // Tabular view case 'tab': $ret_array = $this->get_filters($action->root); break; // Reset filters case 'reset': $ret_array = $this->resetFilters($action->root); break; } return $ret_array; } }
Java
<?php /** * File containing the MultipleObjectConverter class. * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version //autogentag// */ namespace eZ\Publish\Core\MVC\Legacy\Templating\Converter; /** * Interface for multiple object converters. * This is useful if one needs to convert several objects at once. */ interface MultipleObjectConverter extends ObjectConverter { /** * Registers an object to the converter. * $alias is the variable name that will be exposed in the legacy template. * * @param mixed $object * @param string $alias * * @throws \InvalidArgumentException If $object is not an object * * @return void */ public function register( $object, $alias ); /** * Converts all registered objects and returns them in a hash where the object's alias is the key. * * @return array|\eZ\Publish\Core\MVC\Legacy\Templating\LegacyCompatible[] */ public function convertAll(); }
Java