repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
AMG | AMG-master/seq_mv/vector.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for Vector data structure
*
*****************************************************************************/
#ifndef hypre_VECTOR_HEADER
#define hypre_VECTOR_HEADER
/*--------------------------------------------------------------------------
* hypre_Vector
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Complex *data;
HYPRE_Int size;
/* Does the Vector create/destroy `data'? */
HYPRE_Int owns_data;
/* For multivectors...*/
HYPRE_Int num_vectors; /* the above "size" is size of one vector */
HYPRE_Int multivec_storage_method;
/* ...if 0, store colwise v0[0], v0[1], ..., v1[0], v1[1], ... v2[0]... */
/* ...if 1, store rowwise v0[0], v1[0], ..., v0[1], v1[1], ... */
/* With colwise storage, vj[i] = data[ j*size + i]
With rowwise storage, vj[i] = data[ j + num_vectors*i] */
HYPRE_Int vecstride, idxstride;
/* ... so vj[i] = data[ j*vecstride + i*idxstride ] regardless of row_storage.*/
} hypre_Vector;
/*--------------------------------------------------------------------------
* Accessor functions for the Vector structure
*--------------------------------------------------------------------------*/
#define hypre_VectorData(vector) ((vector) -> data)
#define hypre_VectorSize(vector) ((vector) -> size)
#define hypre_VectorOwnsData(vector) ((vector) -> owns_data)
#define hypre_VectorNumVectors(vector) ((vector) -> num_vectors)
#define hypre_VectorMultiVecStorageMethod(vector) ((vector) -> multivec_storage_method)
#define hypre_VectorVectorStride(vector) ((vector) -> vecstride )
#define hypre_VectorIndexStride(vector) ((vector) -> idxstride )
#endif
| 2,725 | 41.59375 | 87 | h |
AMG | AMG-master/utilities/HYPRE_error_f.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
c Copyright (c) 2008, Lawrence Livermore National Security, LLC.
c Produced at the Lawrence Livermore National Laboratory.
c This file is part of HYPRE. See file COPYRIGHT for details.
c
c HYPRE is free software; you can redistribute it and/or modify it under the
c terms of the GNU Lesser General Public License (as published by the Free
c Software Foundation) version 2.1 dated February 1999.
c
c $Revision$
integer HYPRE_ERROR_GENERIC
integer HYPRE_ERROR_MEMORY
integer HYPRE_ERROR_ARG
integer HYPRE_ERROR_CONV
parameter (HYPRE_ERROR_GENERIC = 1)
parameter (HYPRE_ERROR_MEMORY = 2)
parameter (HYPRE_ERROR_ARG = 4)
parameter (HYPRE_ERROR_CONV = 256)
| 1,635 | 45.742857 | 81 | h |
AMG | AMG-master/utilities/HYPRE_utilities.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file for HYPRE_utilities library
*
*****************************************************************************/
#include "HYPRE.h"
#ifndef HYPRE_UTILITIES_HEADER
#define HYPRE_UTILITIES_HEADER
#ifndef HYPRE_SEQUENTIAL
#include "mpi.h"
#endif
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* Before a version of HYPRE goes out the door, increment the version
* number and check in this file (for CVS to substitute the Date).
*/
#define HYPRE_Version() "HYPRE_RELEASE_NAME Date Compiled: " __DATE__ " " __TIME__
/*--------------------------------------------------------------------------
* Real and Complex types
*--------------------------------------------------------------------------*/
#include <float.h>
#if defined(HYPRE_SINGLE)
typedef float HYPRE_Real;
#define HYPRE_REAL_MAX FLT_MAX
#define HYPRE_REAL_MIN FLT_MIN
#define HYPRE_REAL_EPSILON FLT_EPSILON
#define HYPRE_REAL_MIN_EXP FLT_MIN_EXP
#define HYPRE_MPI_REAL MPI_FLOAT
#elif defined(HYPRE_LONG_DOUBLE)
typedef long double HYPRE_Real;
#define HYPRE_REAL_MAX LDBL_MAX
#define HYPRE_REAL_MIN LDBL_MIN
#define HYPRE_REAL_EPSILON LDBL_EPSILON
#define HYPRE_REAL_MIN_EXP DBL_MIN_EXP
#define HYPRE_MPI_REAL MPI_LONG_DOUBLE
#else /* default */
typedef double HYPRE_Real;
#define HYPRE_REAL_MAX DBL_MAX
#define HYPRE_REAL_MIN DBL_MIN
#define HYPRE_REAL_EPSILON DBL_EPSILON
#define HYPRE_REAL_MIN_EXP DBL_MIN_EXP
#define HYPRE_MPI_REAL MPI_DOUBLE
#endif
#if defined(HYPRE_COMPLEX)
typedef double _Complex HYPRE_Complex;
#define HYPRE_MPI_COMPLEX MPI_C_DOUBLE_COMPLEX /* or MPI_LONG_DOUBLE ? */
#else /* default */
typedef HYPRE_Real HYPRE_Complex;
#define HYPRE_MPI_COMPLEX HYPRE_MPI_REAL
#endif
/*--------------------------------------------------------------------------
* Sequential MPI stuff
*--------------------------------------------------------------------------*/
#ifdef HYPRE_SEQUENTIAL
typedef HYPRE_Int MPI_Comm;
#endif
/*--------------------------------------------------------------------------
* HYPRE error codes
*--------------------------------------------------------------------------*/
#define HYPRE_ERROR_GENERIC 1 /* generic error */
#define HYPRE_ERROR_MEMORY 2 /* unable to allocate memory */
#define HYPRE_ERROR_ARG 4 /* argument error */
/* bits 4-8 are reserved for the index of the argument error */
#define HYPRE_ERROR_CONV 256 /* method did not converge as expected */
/*--------------------------------------------------------------------------
* HYPRE error user functions
*--------------------------------------------------------------------------*/
/* Return the current hypre error flag */
HYPRE_Int HYPRE_GetError();
/* Check if the given error flag contains the given error code */
HYPRE_Int HYPRE_CheckError(HYPRE_Int hypre_ierr, HYPRE_Int hypre_error_code);
/* Return the index of the argument (counting from 1) where
argument error (HYPRE_ERROR_ARG) has occured */
HYPRE_Int HYPRE_GetErrorArg();
/* Describe the given error flag in the given string */
void HYPRE_DescribeError(HYPRE_Int hypre_ierr, char *descr);
/* Clears the hypre error flag */
HYPRE_Int HYPRE_ClearAllErrors();
/* Clears the given error code from the hypre error flag */
HYPRE_Int HYPRE_ClearError(HYPRE_Int hypre_error_code);
/*--------------------------------------------------------------------------
* HYPRE AP user functions
*--------------------------------------------------------------------------*/
/*Checks whether the AP is on */
HYPRE_Int HYPRE_AssumedPartitionCheck();
#ifdef __cplusplus
}
#endif
#endif
| 4,644 | 32.178571 | 82 | h |
AMG | AMG-master/utilities/amg_linklist.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/***************************************************************************
*
* Routines for linked list for boomerAMG
*
****************************************************************************/
#include "_hypre_utilities.h"
#define hypre_LIST_HEAD -1
#define hypre_LIST_TAIL -2
/**************************************************************
*
* dispose_elt(): dispose of memory space used by the element
* pointed to by element_ptr. Use the 'free()'
* system call to return it to the free memory
* pool.
*
**************************************************************/
void hypre_dispose_elt ( hypre_LinkList element_ptr )
{
free( element_ptr );
}
/*****************************************************************
*
* remove_point: removes a point from the lists
*
****************************************************************/
void
hypre_remove_point(hypre_LinkList *LoL_head_ptr,
hypre_LinkList *LoL_tail_ptr,
HYPRE_Int measure,
HYPRE_Int index,
HYPRE_Int *lists,
HYPRE_Int *where)
{
hypre_LinkList LoL_head = *LoL_head_ptr;
hypre_LinkList LoL_tail = *LoL_tail_ptr;
hypre_LinkList list_ptr;
list_ptr = LoL_head;
do
{
if (measure == list_ptr->data)
{
/* point to be removed is only point on list,
which must be destroyed */
if (list_ptr->head == index && list_ptr->tail == index)
{
/* removing only list, so num_left better be 0! */
if (list_ptr == LoL_head && list_ptr == LoL_tail)
{
LoL_head = NULL;
LoL_tail = NULL;
hypre_dispose_elt(list_ptr);
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
else if (LoL_head == list_ptr) /*removing 1st (max_measure) list */
{
list_ptr -> next_elt -> prev_elt = NULL;
LoL_head = list_ptr->next_elt;
hypre_dispose_elt(list_ptr);
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
else if (LoL_tail == list_ptr) /* removing last list */
{
list_ptr -> prev_elt -> next_elt = NULL;
LoL_tail = list_ptr->prev_elt;
hypre_dispose_elt(list_ptr);
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
else
{
list_ptr -> next_elt -> prev_elt = list_ptr -> prev_elt;
list_ptr -> prev_elt -> next_elt = list_ptr -> next_elt;
hypre_dispose_elt(list_ptr);
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
}
else if (list_ptr->head == index) /* index is head of list */
{
list_ptr->head = lists[index];
where[lists[index]] = hypre_LIST_HEAD;
return;
}
else if (list_ptr->tail == index) /* index is tail of list */
{
list_ptr->tail = where[index];
lists[where[index]] = hypre_LIST_TAIL;
return;
}
else /* index is in middle of list */
{
lists[where[index]] = lists[index];
where[lists[index]] = where[index];
return;
}
}
list_ptr = list_ptr -> next_elt;
} while (list_ptr != NULL);
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"No such list!\n");
return ;
}
/*****************************************************************
*
* hypre_create_elt() : Create an element using Item for its data field
*
*****************************************************************/
hypre_LinkList hypre_create_elt( HYPRE_Int Item )
{
hypre_LinkList new_elt_ptr;
/* Allocate memory space for the new node.
* return with error if no space available
*/
if ( (new_elt_ptr = (hypre_LinkList) malloc (sizeof(hypre_ListElement))) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"\n create_elt: malloc failed \n\n");
}
else
/* new_elt_ptr = hypre_CTAlloc(hypre_LinkList, 1); */
{
new_elt_ptr -> data = Item;
new_elt_ptr -> next_elt = NULL;
new_elt_ptr -> prev_elt = NULL;
new_elt_ptr -> head = hypre_LIST_TAIL;
new_elt_ptr -> tail = hypre_LIST_HEAD;
}
return (new_elt_ptr);
}
/*****************************************************************
*
* enter_on_lists places point in new list
*
****************************************************************/
void
hypre_enter_on_lists(hypre_LinkList *LoL_head_ptr,
hypre_LinkList *LoL_tail_ptr,
HYPRE_Int measure,
HYPRE_Int index,
HYPRE_Int *lists,
HYPRE_Int *where)
{
hypre_LinkList LoL_head = *LoL_head_ptr;
hypre_LinkList LoL_tail = *LoL_tail_ptr;
hypre_LinkList list_ptr;
hypre_LinkList new_ptr;
HYPRE_Int old_tail;
list_ptr = LoL_head;
if (LoL_head == NULL) /* no lists exist yet */
{
new_ptr = hypre_create_elt(measure);
new_ptr->head = index;
new_ptr->tail = index;
lists[index] = hypre_LIST_TAIL;
where[index] = hypre_LIST_HEAD;
LoL_head = new_ptr;
LoL_tail = new_ptr;
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
else
{
do
{
if (measure > list_ptr->data)
{
new_ptr = hypre_create_elt(measure);
new_ptr->head = index;
new_ptr->tail = index;
lists[index] = hypre_LIST_TAIL;
where[index] = hypre_LIST_HEAD;
if ( list_ptr->prev_elt != NULL)
{
new_ptr->prev_elt = list_ptr->prev_elt;
list_ptr->prev_elt->next_elt = new_ptr;
list_ptr->prev_elt = new_ptr;
new_ptr->next_elt = list_ptr;
}
else
{
new_ptr->next_elt = list_ptr;
list_ptr->prev_elt = new_ptr;
new_ptr->prev_elt = NULL;
LoL_head = new_ptr;
}
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
else if (measure == list_ptr->data)
{
old_tail = list_ptr->tail;
lists[old_tail] = index;
where[index] = old_tail;
lists[index] = hypre_LIST_TAIL;
list_ptr->tail = index;
return;
}
list_ptr = list_ptr->next_elt;
} while (list_ptr != NULL);
new_ptr = hypre_create_elt(measure);
new_ptr->head = index;
new_ptr->tail = index;
lists[index] = hypre_LIST_TAIL;
where[index] = hypre_LIST_HEAD;
LoL_tail->next_elt = new_ptr;
new_ptr->prev_elt = LoL_tail;
new_ptr->next_elt = NULL;
LoL_tail = new_ptr;
*LoL_head_ptr = LoL_head;
*LoL_tail_ptr = LoL_tail;
return;
}
}
| 8,328 | 29.39781 | 85 | c |
AMG | AMG-master/utilities/amg_linklist.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file link lists
*
*****************************************************************************/
#ifndef HYPRE_LINKLIST_HEADER
#define HYPRE_LINKLIST_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
struct double_linked_list
{
HYPRE_Int data;
struct double_linked_list *next_elt;
struct double_linked_list *prev_elt;
HYPRE_Int head;
HYPRE_Int tail;
};
typedef struct double_linked_list hypre_ListElement;
typedef hypre_ListElement *hypre_LinkList;
#ifdef __cplusplus
}
#endif
#endif
| 1,674 | 30.603774 | 81 | h |
AMG | AMG-master/utilities/binsearch.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* hypre_BinarySearch
* to contain ordered nonnegative numbers
* the routine returns the location of the value or -1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BinarySearch(HYPRE_Int *list, HYPRE_Int value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
HYPRE_Int not_found = 1;
low = 0;
high = list_length-1;
while (not_found && low <= high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m - 1;
}
else if (value > list[m])
{
low = m + 1;
}
else
{
not_found = 0;
return m;
}
}
return -1;
}
/*--------------------------------------------------------------------------
* hypre_BinarySearch2
* this one is a bit more robust:
* avoids overflow of m as can happen above when (low+high) overflows
* lets user specifiy high and low bounds for array (so a subset
of array can be used)
* if not found, then spot returns where is should be inserted
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BinarySearch2(HYPRE_Int *list, HYPRE_Int value, HYPRE_Int low, HYPRE_Int high, HYPRE_Int *spot)
{
HYPRE_Int m;
while (low <= high)
{
m = low + (high - low)/2;
if (value < list[m])
high = m - 1;
else if (value > list[m])
low = m + 1;
else
{
*spot = m;
return m;
}
}
/* not found (high = low-1) - so insert at low */
*spot = low;
return -1;
}
/*--------------------------------------------------------------------------
* Equivalent to C++ std::lower_bound
*--------------------------------------------------------------------------*/
HYPRE_Int *hypre_LowerBound( HYPRE_Int *first, HYPRE_Int *last, HYPRE_Int value )
{
HYPRE_Int *it;
size_t count = last - first, step;
while (count > 0) {
it = first; step = count/2; it += step;
if (*it < value) {
first = ++it;
count -= step + 1;
}
else count = step;
}
return first;
}
| 3,166 | 28.055046 | 112 | c |
AMG | AMG-master/utilities/caliper_instrumentation.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file for Caliper instrumentation macros
*
*****************************************************************************/
#ifndef CALIPER_INSTRUMENTATION_HEADER
#define CALIPER_INSTRUMENTATION_HEADER
#include "HYPRE_config.h"
#ifdef HYPRE_USING_CALIPER
#include <caliper/cali.h>
#define HYPRE_ANNOTATION_BEGIN( str ) cali_begin_string_byname("hypre.kernel", str)
#define HYPRE_ANNOTATION_END( str ) cali_end_byname("hypre.kernel")
#else
#define HYPRE_ANNOTATION_BEGIN( str )
#define HYPRE_ANNOTATION_END( str )
#endif
#endif /* CALIPER_INSTRUMENTATION_HEADER */
| 1,593 | 34.422222 | 83 | h |
AMG | AMG-master/utilities/exchange_data.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/* see exchange_data.README for additional information */
/* AHB 6/04 */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_utilities.h"
/*---------------------------------------------------
* hypre_CreateBinaryTree()
* its children and parent processor ids)
*----------------------------------------------------*/
HYPRE_Int hypre_CreateBinaryTree(HYPRE_Int myid, HYPRE_Int num_procs,
hypre_BinaryTree *tree)
{
HYPRE_Int i, proc, size=0;
HYPRE_Int *tmp_child_id;
HYPRE_Int num=0, parent = 0;
/* initialize*/
proc = myid;
/*how many children can a processor have?*/
for (i = 1; i < num_procs; i *= 2)
{
size++;
}
/* allocate space */
tmp_child_id = hypre_TAlloc(HYPRE_Int, size);
/* find children and parent */
for (i = 1; i < num_procs; i *= 2)
{
if ( (proc % 2) == 0)
{
if( (myid + i) < num_procs )
{
tmp_child_id[num] = myid + i;
num++;
}
proc /= 2;
}
else
{
parent = myid - i;
break;
}
}
hypre_BinaryTreeParentId(tree) = parent;
hypre_BinaryTreeNumChild(tree) = num;
hypre_BinaryTreeChildIds(tree) = tmp_child_id;
return hypre_error_flag;
}
/*---------------------------------------------------
* hypre_DestroyBinaryTree()
* Destroy storage created by createBinaryTree
*----------------------------------------------------*/
HYPRE_Int hypre_DestroyBinaryTree(hypre_BinaryTree *tree)
{
hypre_TFree(hypre_BinaryTreeChildIds(tree));
return hypre_error_flag;
}
/*---------------------------------------------------
* hypre_DataExchangeList()
* This function is for sending a list of messages ("contacts" to
* a list of processors. The receiving processors
* do not know how many messages they are getting. The
* sending process expects a "response" (either a confirmation or
* some sort of data back from the receiving processor).
*----------------------------------------------------*/
/* should change to where the buffers for sending and receiving are voids
instead of ints - then cast accordingly */
HYPRE_Int hypre_DataExchangeList(HYPRE_Int num_contacts,
HYPRE_Int *contact_proc_list,
void *contact_send_buf,
HYPRE_Int *contact_send_buf_starts,
HYPRE_Int contact_obj_size,
HYPRE_Int response_obj_size,
hypre_DataExchangeResponse *response_obj,
HYPRE_Int max_response_size,
HYPRE_Int rnum, MPI_Comm comm,
void **p_response_recv_buf,
HYPRE_Int **p_response_recv_buf_starts)
{
/*-------------------------------------------
* parameters:
*
* num_contacts = how many procs to contact
* contact_proc_list = list of processors to contact
* contact_send_buf = array of data to send
* contact_send_buf_starts = index for contact_send_buf corresponding to
* contact_proc_list
* contact_obj_size = sizeof() one obj in contact list
* response_obj_size = sizeof() one obj in response_recv_buf
* response_obj = this will give us the function we need to
* fill the reponse as well as
* any data we might need to accomplish that
* max_response_size = max size of a single response expected (do NOT
* need to be an absolute upper bound)
* rnum = two consequentive exchanges should have different
* rnums. Alternate rnum = 1
* and rnum=2 - these flags will be even (so odd
* numbered tags could be used in calling code)
* p_response_recv_buf = where to receive the reponses - will be allocated
* in this function
* p_response_recv_buf_starts = index of p_response_buf corresponding to
* contact_buf_list - will be allocated here
*-------------------------------------------*/
HYPRE_Int num_procs, myid;
HYPRE_Int i;
HYPRE_Int terminate, responses_complete;
HYPRE_Int children_complete;
HYPRE_Int contact_flag;
HYPRE_Int proc;
HYPRE_Int contact_size;
HYPRE_Int size, post_size, copy_size;
HYPRE_Int total_size, count;
void *start_ptr = NULL, *index_ptr=NULL;
HYPRE_Int *int_ptr=NULL;
void *response_recv_buf = NULL;
void *send_response_buf = NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
void *initial_recv_buf = NULL;
void *recv_contact_buf = NULL;
HYPRE_Int recv_contact_buf_size = 0;
HYPRE_Int response_message_size = 0;
HYPRE_Int overhead;
HYPRE_Int max_response_size_bytes;
HYPRE_Int max_response_total_bytes;
void **post_array = NULL; /*this must be set to null or realloc will crash */
HYPRE_Int post_array_storage = 0;
HYPRE_Int post_array_size = 0;
HYPRE_Int num_post_recvs =0;
void **contact_ptrs = NULL, **response_ptrs=NULL, **post_ptrs=NULL;
hypre_BinaryTree tree;
hypre_MPI_Request *response_requests, *contact_requests;
hypre_MPI_Status *response_statuses, *contact_statuses;
hypre_MPI_Request *post_send_requests = NULL, *post_recv_requests = NULL;
hypre_MPI_Status *post_send_statuses = NULL, *post_recv_statuses = NULL;
hypre_MPI_Request *term_requests, term_request1, request_parent;
hypre_MPI_Status *term_statuses, term_status1, status_parent;
hypre_MPI_Status status, fill_status;
const HYPRE_Int contact_tag = 1000*rnum;
const HYPRE_Int response_tag = 1002*rnum;
const HYPRE_Int term_tag = 1004*rnum;
const HYPRE_Int post_tag = 1006*rnum;
hypre_MPI_Comm_size(comm, &num_procs );
hypre_MPI_Comm_rank(comm, &myid );
/* ---------initializations ----------------*/
/* if the response_obj_size or contact_obj_size is 0, set to sizeof(HYPRE_Int) */
if (!response_obj_size) response_obj_size = sizeof(HYPRE_Int);
if (!contact_obj_size) contact_obj_size = sizeof(HYPRE_Int);
max_response_size_bytes = max_response_size*response_obj_size;
/* pre-allocate the max space for responding to contacts */
overhead = ceil((HYPRE_Real) sizeof(HYPRE_Int)/response_obj_size); /*for appending an integer*/
max_response_total_bytes = (max_response_size+overhead)*response_obj_size;
response_obj->send_response_overhead = overhead;
response_obj->send_response_storage = max_response_size;
/*send_response_buf = hypre_MAlloc(max_response_total_bytes);*/
send_response_buf = hypre_CAlloc(max_response_size+overhead, response_obj_size);
/*allocate space for inital recv array for the responses - give each processor
size max_response_size */
initial_recv_buf = hypre_MAlloc(max_response_total_bytes*num_contacts);
response_recv_buf_starts = hypre_CTAlloc(HYPRE_Int, num_contacts+1);
contact_ptrs = hypre_TAlloc( void *, num_contacts);
response_ptrs = hypre_TAlloc(void *, num_contacts);
/*-------------SEND CONTACTS AND POST RECVS FOR RESPONSES---*/
for (i=0; i<= num_contacts; i++)
{
response_recv_buf_starts[i] = i*(max_response_size+overhead);
}
/* Send "contact" messages to the list of processors and
pre-post receives to wait for their response*/
responses_complete = 1;
if (num_contacts > 0 )
{
responses_complete = 0;
response_requests = hypre_CTAlloc(hypre_MPI_Request, num_contacts);
response_statuses = hypre_CTAlloc(hypre_MPI_Status, num_contacts);
contact_requests = hypre_CTAlloc(hypre_MPI_Request, num_contacts);
contact_statuses = hypre_CTAlloc(hypre_MPI_Status, num_contacts);
/* post receives - could be confirmation or data*/
/* the size to post is max_response_total_bytes*/
for (i=0; i< num_contacts; i++)
{
/* response_ptrs[i] = initial_recv_buf + i*max_response_total_bytes ; */
response_ptrs[i] = (void *)((char *) initial_recv_buf +
i*max_response_total_bytes) ;
hypre_MPI_Irecv(response_ptrs[i], max_response_total_bytes,
hypre_MPI_BYTE, contact_proc_list[i],
response_tag, comm, &response_requests[i]);
}
/* send out contact messages */
start_ptr = contact_send_buf;
for (i=0; i< num_contacts; i++)
{
contact_ptrs[i] = start_ptr;
size = contact_send_buf_starts[i+1] - contact_send_buf_starts[i] ;
hypre_MPI_Isend(contact_ptrs[i], size*contact_obj_size,
hypre_MPI_BYTE, contact_proc_list[i],
contact_tag, comm, &contact_requests[i]);
/* start_ptr += (size*contact_obj_size); */
start_ptr = (void *) ((char *) start_ptr + (size*contact_obj_size));
}
}
/*------------BINARY TREE-----------------------*/
/*Now let's find out our binary tree information and
initialize for the termination check sweep */
children_complete = 1;/*indicates whether we have recv. term messages
from our children*/
if (num_procs > 1)
{
hypre_CreateBinaryTree(myid, num_procs, &tree);
/* we will get a message from all of our children when they
have received responses for all of their contacts.
So post receives now */
term_requests = hypre_CTAlloc(hypre_MPI_Request, tree.num_child);
term_statuses = hypre_CTAlloc(hypre_MPI_Status, tree.num_child);
for (i=0; i< tree.num_child; i++)
{
hypre_MPI_Irecv(NULL, 0, HYPRE_MPI_INT, tree.child_id[i], term_tag, comm,
&term_requests[i]);
}
terminate = 0;
children_complete = 0;
}
else if (num_procs ==1 && num_contacts > 0 ) /* added 11/08 */
{
terminate = 0;
}
/*---------PROBE LOOP-----------------------------------------*/
/*Look for incoming contact messages - don't know how many I will get!*/
while (!terminate)
{
/* did I receive any contact messages? */
hypre_MPI_Iprobe(hypre_MPI_ANY_SOURCE, contact_tag, comm,
&contact_flag, &status);
while (contact_flag)
{
/* received contacts - from who and what do we do ?*/
proc = status.hypre_MPI_SOURCE;
hypre_MPI_Get_count(&status, hypre_MPI_BYTE, &contact_size);
contact_size = contact_size/contact_obj_size;
/*---------------FILL RESPONSE ------------------------*/
/*first receive the contact buffer - then call a function
to determine how to populate the send buffer for the reponse*/
/* do we have enough space to recv it? */
if(contact_size > recv_contact_buf_size)
{
recv_contact_buf = hypre_ReAlloc((char*)recv_contact_buf,
contact_obj_size*contact_size);
recv_contact_buf_size = contact_size;
}
/* this must be blocking - can't fill recv without the buffer*/
hypre_MPI_Recv(recv_contact_buf, contact_size*contact_obj_size,
hypre_MPI_BYTE, proc, contact_tag, comm, &fill_status);
response_obj->fill_response(recv_contact_buf, contact_size, proc,
response_obj, comm, &send_response_buf,
&response_message_size );
/* we need to append the size of the send obj */
/* first we copy out any part that may be needed to send later so we don't overwrite */
post_size = response_message_size - max_response_size;
if (post_size > 0) /*we will need to send the extra information later */
{
/*hypre_printf("myid = %d, post_size = %d\n", myid, post_size);*/
if (post_array_size == post_array_storage)
{
/* allocate room for more posts - add 20*/
post_array_storage += 20;
post_array = hypre_TReAlloc(post_array, void *, post_array_storage);
post_send_requests =
hypre_TReAlloc(post_send_requests, hypre_MPI_Request,
post_array_storage);
}
/* allocate space for the data this post only*/
/* this should not happen often (unless a poor max_size has been chosen)
- so we will allocate space for the data as needed */
size = post_size*response_obj_size;
post_array[post_array_size] = hypre_MAlloc(size);
/* index_ptr = send_response_buf + max_response_size_bytes */;
index_ptr = (void *) ((char *) send_response_buf +
max_response_size_bytes);
memcpy(post_array[post_array_size], index_ptr, size);
/*now post any part of the message that is too long with a non-blocking
send and a different tag */
hypre_MPI_Isend(post_array[post_array_size], size,
hypre_MPI_BYTE, proc, post_tag,
/*hypre_MPI_COMM_WORLD, */
comm,
&post_send_requests[post_array_size]);
post_array_size++;
}
/*now append the size information into the overhead storage */
/* index_ptr = send_response_buf + max_response_size_bytes; */
index_ptr = (void *) ((char *) send_response_buf +
max_response_size_bytes);
memcpy(index_ptr, &response_message_size, sizeof(HYPRE_Int));
/*send the block of data that includes the overhead */
/* this is a blocking send - the recv has already been posted */
hypre_MPI_Send(send_response_buf, max_response_total_bytes,
hypre_MPI_BYTE, proc, response_tag, comm);
/*--------------------------------------------------------------*/
/* look for any more contact messages*/
hypre_MPI_Iprobe(hypre_MPI_ANY_SOURCE, contact_tag, comm,
&contact_flag, &status);
}
/* no more contact messages waiting - either
(1) check to see if we have received all of our response messages
(2) participate in termination (check for messages from children)
(3) participate in termination sweep (check for message from parent) */
if (!responses_complete)
{
hypre_MPI_Testall(num_contacts, response_requests, &responses_complete,
response_statuses);
if (responses_complete && num_procs == 1) terminate = 1; /*added 11/08 */
}
else if(!children_complete) /* have all of our children received all of their
response messages?*/
{
hypre_MPI_Testall(tree.num_child, term_requests, &children_complete,
term_statuses);
/* if we have gotten term messages from all of our children, send a term
message to our parent. Then post a receive to hear back from parent */
if (children_complete & (myid > 0)) /*root does not have a parent*/
{
hypre_MPI_Isend(NULL, 0, HYPRE_MPI_INT, tree.parent_id, term_tag,
comm, &request_parent);
hypre_MPI_Irecv(NULL, 0, HYPRE_MPI_INT, tree.parent_id, term_tag,
comm, &term_request1);
}
}
else /*have we gotten a term message from our parent? */
{
if (myid == 0) /* root doesn't have a parent */
{
terminate = 1;
}
else
{
hypre_MPI_Test(&term_request1, &terminate, &term_status1);
}
if (terminate) /*tell children to terminate */
{
if (myid > 0 ) hypre_MPI_Wait(&request_parent, &status_parent);
for (i=0; i< tree.num_child; i++)
{ /*a blocking send - recv has been posted already*/
hypre_MPI_Send(NULL, 0, HYPRE_MPI_INT, tree.child_id[i],
term_tag, comm);
}
}
}
}
/* end of (!terminate) loop */
/* ----some clean up before post-processing ----*/
if (recv_contact_buf_size > 0)
{
hypre_TFree(recv_contact_buf);
}
hypre_Free((char*)send_response_buf);
hypre_TFree(contact_ptrs);
hypre_TFree(response_ptrs);
/*-----------------POST PROCESSING------------------------------*/
/* more data to receive? */
/* move to recv buffer and update response_recv_buf_starts */
total_size = 0; /*total number of items in response buffer */
num_post_recvs = 0; /*num of post processing recvs to post */
start_ptr = initial_recv_buf;
response_recv_buf_starts[0] = 0; /*already allocated above */
/*an extra loop to determine sizes. This is better than reallocating
the array that will be used in posting the irecvs */
for (i=0; i< num_contacts; i++)
{
int_ptr = (HYPRE_Int *) ((char *) start_ptr + max_response_size_bytes); /*the overhead HYPRE_Int*/
response_message_size = *int_ptr;
response_recv_buf_starts[i+1] =
response_recv_buf_starts[i] + response_message_size;
total_size += response_message_size;
if (max_response_size < response_message_size) num_post_recvs++;
/* start_ptr += max_response_total_bytes; */
start_ptr = (void *) ((char *) start_ptr + max_response_total_bytes);
}
post_recv_requests = hypre_TAlloc(hypre_MPI_Request, num_post_recvs);
post_recv_statuses = hypre_TAlloc(hypre_MPI_Status, num_post_recvs);
post_ptrs = hypre_TAlloc(void *, num_post_recvs);
/*second loop to post any recvs and set up recv_response_buf */
response_recv_buf = hypre_MAlloc(total_size*response_obj_size);
index_ptr = response_recv_buf;
start_ptr = initial_recv_buf;
count = 0;
for (i=0; i< num_contacts; i++)
{
response_message_size =
response_recv_buf_starts[i+1] - response_recv_buf_starts[i];
copy_size = hypre_min(response_message_size, max_response_size);
memcpy(index_ptr, start_ptr, copy_size*response_obj_size);
/* index_ptr += copy_size*response_obj_size; */
index_ptr = (void *) ((char *) index_ptr + copy_size*response_obj_size);
if (max_response_size < response_message_size)
{
size = (response_message_size - max_response_size)*response_obj_size;
post_ptrs[count] = index_ptr;
hypre_MPI_Irecv(post_ptrs[count], size, hypre_MPI_BYTE,
contact_proc_list[i], post_tag,
comm, &post_recv_requests[count]);
count++;
/* index_ptr+=size;*/
index_ptr= (void *) ((char *) index_ptr + size);
}
/* start_ptr += max_response_total_bytes; */
start_ptr = (void *) ((char *) start_ptr + max_response_total_bytes);
}
post_send_statuses = hypre_TAlloc(hypre_MPI_Status, post_array_size);
/*--------------CLEAN UP------------------- */
hypre_Free((char*)initial_recv_buf);
if (num_contacts > 0 )
{
/*these should be done */
hypre_MPI_Waitall(num_contacts, contact_requests, contact_statuses);
hypre_TFree(response_requests);
hypre_TFree(response_statuses);
hypre_TFree(contact_requests);
hypre_TFree(contact_statuses);
}
/* clean up from the post processing - the arrays, requests, etc. */
if (num_post_recvs)
{
hypre_MPI_Waitall(num_post_recvs, post_recv_requests, post_recv_statuses);
hypre_TFree(post_recv_requests);
hypre_TFree(post_recv_statuses);
hypre_TFree(post_ptrs);
}
if (post_array_size)
{
hypre_MPI_Waitall(post_array_size, post_send_requests, post_send_statuses);
hypre_TFree(post_send_requests);
hypre_TFree(post_send_statuses);
for (i=0; i< post_array_size; i++)
{
hypre_Free((char*)post_array[i]);
}
hypre_TFree(post_array);
}
if (num_procs > 1)
{
hypre_TFree(term_requests);
hypre_TFree(term_statuses);
hypre_DestroyBinaryTree(&tree);
}
/* output */
*p_response_recv_buf = response_recv_buf;
*p_response_recv_buf_starts = response_recv_buf_starts;
return hypre_error_flag;
}
| 21,979 | 36.508532 | 104 | c |
AMG | AMG-master/utilities/exchange_data.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef hypre_EXCHANGE_DATA_HEADER
#define hypre_EXCHANGE_DATA_HEADER
#define hypre_BinaryTreeParentId(tree) (tree->parent_id)
#define hypre_BinaryTreeNumChild(tree) (tree->num_child)
#define hypre_BinaryTreeChildIds(tree) (tree->child_id)
#define hypre_BinaryTreeChildId(tree, i) (tree->child_id[i])
typedef struct
{
HYPRE_Int parent_id;
HYPRE_Int num_child;
HYPRE_Int *child_id;
} hypre_BinaryTree;
/* In the fill_response() function the user needs to set the recv__buf
and the response_message_size. Memory of size send_response_storage has been
alllocated for the send_buf (in exchange_data) - if more is needed, then
realloc and adjust
the send_response_storage. The realloc amount should be storage+overhead.
If the response is an empty "confirmation" message, then set
response_message_size =0 (and do not modify the send_buf) */
typedef struct
{
HYPRE_Int (*fill_response)(void* recv_buf, HYPRE_Int contact_size,
HYPRE_Int contact_proc, void* response_obj,
MPI_Comm comm, void** response_buf,
HYPRE_Int* response_message_size);
HYPRE_Int send_response_overhead; /*set by exchange data */
HYPRE_Int send_response_storage; /*storage allocated for send_response_buf*/
void *data1; /*data fields user may want to access in fill_response */
void *data2;
} hypre_DataExchangeResponse;
HYPRE_Int hypre_CreateBinaryTree(HYPRE_Int, HYPRE_Int, hypre_BinaryTree*);
HYPRE_Int hypre_DestroyBinaryTree(hypre_BinaryTree*);
HYPRE_Int hypre_DataExchangeList(HYPRE_Int num_contacts,
HYPRE_Int *contact_proc_list, void *contact_send_buf,
HYPRE_Int *contact_send_buf_starts, HYPRE_Int contact_obj_size,
HYPRE_Int response_obj_size,
hypre_DataExchangeResponse *response_obj, HYPRE_Int max_response_size,
HYPRE_Int rnum, MPI_Comm comm, void **p_response_recv_buf,
HYPRE_Int **p_response_recv_buf_starts);
#endif /* end of header */
| 3,071 | 44.850746 | 92 | h |
AMG | AMG-master/utilities/general.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* General structures and values
*
*****************************************************************************/
#ifndef hypre_GENERAL_HEADER
#define hypre_GENERAL_HEADER
/* This allows us to consistently avoid 'int' throughout hypre */
typedef int hypre_int;
typedef long int hypre_longint;
typedef unsigned int hypre_uint;
typedef unsigned long int hypre_ulongint;
/* This allows us to consistently avoid 'double' throughout hypre */
typedef double hypre_double;
/*--------------------------------------------------------------------------
* Define various functions
*--------------------------------------------------------------------------*/
#ifndef hypre_max
#define hypre_max(a,b) (((a)<(b)) ? (b) : (a))
#endif
#ifndef hypre_min
#define hypre_min(a,b) (((a)<(b)) ? (a) : (b))
#endif
#ifndef hypre_abs
#define hypre_abs(a) (((a)>0) ? (a) : -(a))
#endif
#ifndef hypre_round
#define hypre_round(x) ( ((x) < 0.0) ? ((HYPRE_Int)(x - 0.5)) : ((HYPRE_Int)(x + 0.5)) )
#endif
#ifndef hypre_pow2
#define hypre_pow2(i) ( 1 << (i) )
#endif
#endif
| 2,111 | 33.622951 | 89 | h |
AMG | AMG-master/utilities/hypre_complex.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
#ifdef HYPRE_COMPLEX
#include <complex.h>
HYPRE_Complex
hypre_conj( HYPRE_Complex value )
{
return conj(value);
}
HYPRE_Real
hypre_cabs( HYPRE_Complex value )
{
return cabs(value);
}
HYPRE_Real
hypre_creal( HYPRE_Complex value )
{
return creal(value);
}
HYPRE_Real
hypre_cimag( HYPRE_Complex value )
{
return cimag(value);
}
#endif
| 1,306 | 25.673469 | 81 | c |
AMG | AMG-master/utilities/hypre_error.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
HYPRE_Int hypre__global_error = 0;
/* Process the error with code ierr raised in the given line of the
given source file. */
void hypre_error_handler(const char *filename, HYPRE_Int line, HYPRE_Int ierr, const char *msg)
{
hypre_error_flag |= ierr;
#ifdef HYPRE_PRINT_ERRORS
if (msg)
{
hypre_fprintf(
stderr, "hypre error in file \"%s\", line %d, error code = %d - %s\n",
filename, line, ierr, msg);
}
else
{
hypre_fprintf(
stderr, "hypre error in file \"%s\", line %d, error code = %d\n",
filename, line, ierr);
}
#endif
}
HYPRE_Int HYPRE_GetError()
{
return hypre_error_flag;
}
HYPRE_Int HYPRE_CheckError(HYPRE_Int ierr, HYPRE_Int hypre_error_code)
{
return ierr & hypre_error_code;
}
void HYPRE_DescribeError(HYPRE_Int ierr, char *msg)
{
if (ierr == 0)
hypre_sprintf(msg,"[No error] ");
if (ierr & HYPRE_ERROR_GENERIC)
hypre_sprintf(msg,"[Generic error] ");
if (ierr & HYPRE_ERROR_MEMORY)
hypre_sprintf(msg,"[Memory error] ");
if (ierr & HYPRE_ERROR_ARG)
hypre_sprintf(msg,"[Error in argument %d] ", HYPRE_GetErrorArg());
if (ierr & HYPRE_ERROR_CONV)
hypre_sprintf(msg,"[Method did not converge] ");
}
HYPRE_Int HYPRE_GetErrorArg()
{
return (hypre_error_flag>>3 & 31);
}
HYPRE_Int HYPRE_ClearAllErrors()
{
hypre_error_flag = 0;
return (hypre_error_flag != 0);
}
HYPRE_Int HYPRE_ClearError(HYPRE_Int hypre_error_code)
{
hypre_error_flag &= ~hypre_error_code;
return (hypre_error_flag & hypre_error_code);
}
| 2,517 | 26.977778 | 95 | c |
AMG | AMG-master/utilities/hypre_error.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef hypre_ERROR_HEADER
#define hypre_ERROR_HEADER
/*--------------------------------------------------------------------------
* Global variable used in hypre error checking
*--------------------------------------------------------------------------*/
extern HYPRE_Int hypre__global_error;
#define hypre_error_flag hypre__global_error
/*--------------------------------------------------------------------------
* HYPRE error macros
*--------------------------------------------------------------------------*/
void hypre_error_handler(const char *filename, HYPRE_Int line, HYPRE_Int ierr, const char *msg);
#define hypre_error(IERR) hypre_error_handler(__FILE__, __LINE__, IERR, NULL)
#define hypre_error_w_msg(IERR, msg) hypre_error_handler(__FILE__, __LINE__, IERR, msg)
#define hypre_error_in_arg(IARG) hypre_error(HYPRE_ERROR_ARG | IARG<<3)
#ifdef NDEBUG
#define hypre_assert(EX)
#else
#define hypre_assert(EX) if (!(EX)) {hypre_fprintf(stderr,"hypre_assert failed: %s\n", #EX); hypre_error(1);}
#endif
#endif
| 1,958 | 43.522727 | 109 | h |
AMG | AMG-master/utilities/hypre_hopscotch_hash.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Jongsoo Park et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
************************************************************************EHEADER*/
#include "hypre_hopscotch_hash.h"
static HYPRE_Int NearestPowerOfTwo( HYPRE_Int value )
{
HYPRE_Int rc = 1;
while (rc < value) {
rc <<= 1;
}
return rc;
}
static void InitBucket(hypre_HopscotchBucket *b)
{
b->hopInfo = 0;
b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void InitSegment(hypre_HopscotchSegment *s)
{
s->timestamp = 0;
omp_init_lock(&s->lock);
}
static void DestroySegment(hypre_HopscotchSegment *s)
{
omp_destroy_lock(&s->lock);
}
#endif
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < s->segmentMask + 1)
{
inCapacity = s->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
s->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1);
for (i = 0; i <= s->segmentMask; ++i)
{
InitSegment(&s->segments[i]);
}
#endif
s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets);
s->key = hypre_TAlloc(HYPRE_Int, num_buckets);
s->hash = hypre_TAlloc(HYPRE_Int, num_buckets);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; ++i)
{
s->hopInfo[i] = 0;
s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
}
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < m->segmentMask + 1)
{
inCapacity = m->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
m->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1);
for (i = 0; i <= m->segmentMask; i++)
{
InitSegment(&m->segments[i]);
}
#endif
m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; i++)
{
InitBucket(&m->table[i]);
}
}
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s )
{
hypre_TFree(s->hopInfo);
hypre_TFree(s->key);
hypre_TFree(s->hash);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= s->segmentMask; i++)
{
DestroySegment(&s->segments[i]);
}
hypre_TFree(s->segments);
#endif
}
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m)
{
hypre_TFree(m->table);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= m->segmentMask; i++)
{
DestroySegment(&m->segments[i]);
}
hypre_TFree(m->segments);
#endif
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len )
{
/*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/
HYPRE_Int *prefix_sum_workspace;
HYPRE_Int *ret_array = NULL;
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel
#endif
{
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, n);
HYPRE_Int cnt = 0;
HYPRE_Int i;
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) cnt++;
}
hypre_prefix_sum(&cnt, len, prefix_sum_workspace);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#pragma omp master
#endif
{
ret_array = hypre_TAlloc(HYPRE_Int, *len);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) ret_array[cnt++] = s->key[i];
}
}
hypre_TFree(prefix_sum_workspace);
return ret_array;
}
| 5,274 | 25.243781 | 87 | c |
AMG | AMG-master/utilities/hypre_memory.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Memory management utilities
*
*****************************************************************************/
#include "_hypre_utilities.h"
#ifdef HYPRE_USE_UMALLOC
#undef HYPRE_USE_UMALLOC
#endif
/******************************************************************************
*
* Standard routines
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* hypre_OutOfMemory
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_OutOfMemory( size_t size )
{
hypre_printf("Out of memory trying to allocate %d bytes\n", (HYPRE_Int) size);
fflush(stdout);
hypre_error(HYPRE_ERROR_MEMORY);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_MAlloc
*--------------------------------------------------------------------------*/
char *
hypre_MAlloc( size_t size )
{
void *ptr;
if (size > 0)
{
#ifdef HYPRE_USE_UMALLOC
HYPRE_Int threadid = hypre_GetThreadID();
ptr = _umalloc_(size);
#else
ptr = malloc(size);
#endif
#if 1
if (ptr == NULL)
{
hypre_OutOfMemory(size);
}
#endif
}
else
{
ptr = NULL;
}
return (char*)ptr;
}
/*--------------------------------------------------------------------------
* hypre_CAlloc
*--------------------------------------------------------------------------*/
char *
hypre_CAlloc( size_t count,
size_t elt_size )
{
void *ptr;
size_t size = count*elt_size;
if (size > 0)
{
#ifdef HYPRE_USE_UMALLOC
HYPRE_Int threadid = hypre_GetThreadID();
ptr = _ucalloc_(count, elt_size);
#else
ptr = calloc(count, elt_size);
#endif
#if 1
if (ptr == NULL)
{
hypre_OutOfMemory(size);
}
#endif
}
else
{
ptr = NULL;
}
return(char*) ptr;
}
/*--------------------------------------------------------------------------
* hypre_ReAlloc
*--------------------------------------------------------------------------*/
char *
hypre_ReAlloc( char *ptr,
size_t size )
{
#ifdef HYPRE_USE_UMALLOC
if (ptr == NULL)
{
ptr = hypre_MAlloc(size);
}
else if (size == 0)
{
hypre_Free(ptr);
}
else
{
HYPRE_Int threadid = hypre_GetThreadID();
ptr = (char*)_urealloc_(ptr, size);
}
#else
if (ptr == NULL)
{
ptr = (char*)malloc(size);
}
else
{
ptr = (char*)realloc(ptr, size);
}
#endif
#if 1
if ((ptr == NULL) && (size > 0))
{
hypre_OutOfMemory(size);
}
#endif
return ptr;
}
/*--------------------------------------------------------------------------
* hypre_Free
*--------------------------------------------------------------------------*/
void
hypre_Free( char *ptr )
{
if (ptr)
{
#ifdef HYPRE_USE_UMALLOC
HYPRE_Int threadid = hypre_GetThreadID();
_ufree_(ptr);
#else
free(ptr);
#endif
}
}
| 4,033 | 21.164835 | 81 | c |
AMG | AMG-master/utilities/hypre_memory.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file for memory management utilities
*
*****************************************************************************/
#ifndef hypre_MEMORY_HEADER
#define hypre_MEMORY_HEADER
#include <stdio.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
* Use "Debug Malloc Library", dmalloc
*--------------------------------------------------------------------------*/
#ifdef HYPRE_MEMORY_DMALLOC
#define hypre_InitMemoryDebug(id) hypre_InitMemoryDebugDML(id)
#define hypre_FinalizeMemoryDebug() hypre_FinalizeMemoryDebugDML()
#define hypre_TAlloc(type, count) \
( (type *)hypre_MAllocDML((size_t)(sizeof(type) * (count)),\
__FILE__, __LINE__) )
#define hypre_CTAlloc(type, count) \
( (type *)hypre_CAllocDML((size_t)(count), (size_t)sizeof(type),\
__FILE__, __LINE__) )
#define hypre_TReAlloc(ptr, type, count) \
( (type *)hypre_ReAllocDML((char *)ptr,\
(size_t)(sizeof(type) * (count)),\
__FILE__, __LINE__) )
#define hypre_TFree(ptr) \
( hypre_FreeDML((char *)ptr, __FILE__, __LINE__), ptr = NULL )
/*--------------------------------------------------------------------------
* Use standard memory routines
*--------------------------------------------------------------------------*/
#else
#define hypre_InitMemoryDebug(id)
#define hypre_FinalizeMemoryDebug()
#define hypre_TAlloc(type, count) \
( (type *)hypre_MAlloc((size_t)(sizeof(type) * (count))) )
#define hypre_CTAlloc(type, count) \
( (type *)hypre_CAlloc((size_t)(count), (size_t)sizeof(type)) )
#define hypre_TReAlloc(ptr, type, count) \
( (type *)hypre_ReAlloc((char *)ptr, (size_t)(sizeof(type) * (count))) )
#define hypre_TFree(ptr) \
( hypre_Free((char *)ptr), ptr = NULL )
#endif
#define hypre_SharedTAlloc(type, count) hypre_TAlloc(type, (count))
#define hypre_SharedCTAlloc(type, count) hypre_CTAlloc(type, (count))
#define hypre_SharedTReAlloc(type, count) hypre_TReAlloc(type, (count))
#define hypre_SharedTFree(ptr) hypre_TFree(ptr)
/*--------------------------------------------------------------------------
* Prototypes
*--------------------------------------------------------------------------*/
/* hypre_memory.c */
HYPRE_Int hypre_OutOfMemory ( size_t size );
char *hypre_MAlloc ( size_t size );
char *hypre_CAlloc ( size_t count , size_t elt_size );
char *hypre_ReAlloc ( char *ptr , size_t size );
void hypre_Free ( char *ptr );
char *hypre_SharedMAlloc ( size_t size );
char *hypre_SharedCAlloc ( size_t count , size_t elt_size );
char *hypre_SharedReAlloc ( char *ptr , size_t size );
void hypre_SharedFree ( char *ptr );
HYPRE_Real *hypre_IncrementSharedDataPtr ( HYPRE_Real *ptr , size_t size );
/* memory_dmalloc.c */
HYPRE_Int hypre_InitMemoryDebugDML( HYPRE_Int id );
HYPRE_Int hypre_FinalizeMemoryDebugDML( void );
char *hypre_MAllocDML( HYPRE_Int size , char *file , HYPRE_Int line );
char *hypre_CAllocDML( HYPRE_Int count , HYPRE_Int elt_size , char *file , HYPRE_Int line );
char *hypre_ReAllocDML( char *ptr , HYPRE_Int size , char *file , HYPRE_Int line );
void hypre_FreeDML( char *ptr , char *file , HYPRE_Int line );
#ifdef __cplusplus
}
#endif
#endif
| 4,292 | 35.692308 | 92 | h |
AMG | AMG-master/utilities/hypre_merge_sort.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Jongsoo Park et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "../seq_mv/HYPRE_seq_mv.h"
//#define DBG_MERGE_SORT
#ifdef DBG_MERGE_SORT
#include <assert.h>
#include <algorithm>
#include <unordered_map>
#endif
#define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0)
static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out)
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
static void kth_element_(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2,
HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
assert(left <= right && right <= k);
assert(i < k); // i == k implies left == right == k that can never happen
assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
static void kth_element(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_Int *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
assert(*out1 + *out2 == k);
#endif
}
/**
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zeor-based) among the threads that participate in this merge
*/
static void hypre_parallel_merge(
HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2,
HYPRE_Int *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = last1 - first1;
HYPRE_Int n2 = last2 - first2;
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(first1, last1));
assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
assert(begin1 <= end1);
assert(begin2 <= end2);
#endif
hypre_merge(
first1 + begin1, first1 + end1,
first2 + begin2, first2 + end2,
out + begin1 + begin2);
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_qsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_Int *in_buf = in;
HYPRE_Int *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_parallel_merge(
in_buf + in_group1_begin, in_buf + in_group1_end,
in_buf + in_group2_begin, in_buf + in_group2_end,
out_buf + in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_Int *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
void hypre_sort_and_create_inverse_map(
HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len);
hypre_merge_sort(in, temp, len, out);
hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);
assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
}
assert(hypre_UnorderedIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp);
}
else
{
hypre_TFree(in);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#endif
/* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
| 10,278 | 26.706199 | 113 | c |
AMG | AMG-master/utilities/hypre_printf.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
#include <stdarg.h>
#include <stdio.h>
// #ifdef HYPRE_BIGINT
/* these prototypes are missing by default for some compilers */
int vscanf( const char *format , va_list arg );
int vfscanf( FILE *stream , const char *format, va_list arg );
int vsscanf( const char *s , const char *format, va_list arg );
HYPRE_Int
new_format( const char *format,
char **newformat_ptr )
{
const char *fp;
char *newformat, *nfp;
HYPRE_Int newformatlen;
HYPRE_Int foundpercent = 0;
newformatlen = 2*strlen(format)+1; /* worst case is all %d's to %lld's */
newformat = hypre_TAlloc(char, newformatlen);
nfp = newformat;
for (fp = format; *fp != '\0'; fp++)
{
if (*fp == '%')
{
foundpercent = 1;
}
else if (foundpercent)
{
if (*fp == 'l')
{
fp++; /* remove 'l' and maybe add it back in switch statement */
if (*fp == 'l')
{
fp++; /* remove second 'l' if present */
}
}
switch(*fp)
{
case 'd':
case 'i':
#if defined(HYPRE_BIGINT)
*nfp = 'l'; nfp++;
*nfp = 'l'; nfp++;
#endif
foundpercent = 0; break;
case 'f':
case 'e':
case 'E':
case 'g':
case 'G':
#if defined(HYPRE_SINGLE) /* no modifier */
#elif defined(HYPRE_LONG_DOUBLE) /* modify with 'L' */
*nfp = 'L'; nfp++;
#else /* modify with 'l' (default is _double_) */
*nfp = 'l'; nfp++;
#endif
foundpercent = 0; break;
case 'c':
case 'n':
case 'o':
case 'p':
case 's':
case 'u':
case 'x':
case 'X':
case '%':
foundpercent = 0; break;
}
}
*nfp = *fp; nfp++;
}
*nfp = *fp;
*newformat_ptr = newformat;
/* printf("\nNEWFORMAT: %s\n", *newformat_ptr);*/
return 0;
}
HYPRE_Int
free_format( char *newformat )
{
hypre_TFree(newformat);
return 0;
}
/* printf functions */
HYPRE_Int
hypre_printf( const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vprintf(newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
HYPRE_Int
hypre_fprintf( FILE *stream, const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vfprintf(stream, newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
HYPRE_Int
hypre_sprintf( char *s, const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vsprintf(s, newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
/* scanf functions */
HYPRE_Int
hypre_scanf( const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vscanf(newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
HYPRE_Int
hypre_fscanf( FILE *stream, const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vfscanf(stream, newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
HYPRE_Int
hypre_sscanf( char *s, const char *format, ...)
{
va_list ap;
char *newformat;
HYPRE_Int ierr = 0;
va_start(ap, format);
new_format(format, &newformat);
ierr = vsscanf(s, newformat, ap);
free_format(newformat);
va_end(ap);
return ierr;
}
// #else
//
// /* this is used only to eliminate compiler warnings */
// HYPRE_Int hypre_printf_empty;
//
// #endif
| 4,972 | 22.023148 | 81 | c |
AMG | AMG-master/utilities/hypre_qsort.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include <math.h>
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap( HYPRE_Int *v,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
temp = v[i];
v[i] = v[j];
v[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap2(HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
HYPRE_Real temp2;
temp = v[i];
v[i] = v[j];
v[j] = temp;
temp2 = w[i];
w[i] = w[j];
w[j] = temp2;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap2i(HYPRE_Int *v,
HYPRE_Int *w,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
temp = v[i];
v[i] = v[j];
v[j] = temp;
temp = w[i];
w[i] = w[j];
w[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* AB 11/04 */
void hypre_swap3i(HYPRE_Int *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
temp = v[i];
v[i] = v[j];
v[j] = temp;
temp = w[i];
w[i] = w[j];
w[j] = temp;
temp = z[i];
z[i] = z[j];
z[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap3_d(HYPRE_Real *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
HYPRE_Real temp_d;
temp_d = v[i];
v[i] = v[j];
v[j] = temp_d;
temp = w[i];
w[i] = w[j];
w[j] = temp;
temp = z[i];
z[i] = z[j];
z[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap4_d(HYPRE_Real *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int *y,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Int temp;
HYPRE_Real temp_d;
temp_d = v[i];
v[i] = v[j];
v[j] = temp_d;
temp = w[i];
w[i] = w[j];
w[j] = temp;
temp = z[i];
z[i] = z[j];
z[j] = temp;
temp = y[i];
y[i] = y[j];
y[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_swap_d( HYPRE_Real *v,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Real temp;
temp = v[i];
v[i] = v[j];
v[j] = temp;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_qsort0( HYPRE_Int *v,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap( v, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (v[i] < v[left])
{
hypre_swap(v, ++last, i);
}
hypre_swap(v, left, last);
hypre_qsort0(v, left, last-1);
hypre_qsort0(v, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_qsort1( HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (v[i] < v[left])
{
hypre_swap2(v, w, ++last, i);
}
hypre_swap2(v, w, left, last);
hypre_qsort1(v, w, left, last-1);
hypre_qsort1(v, w, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void hypre_qsort2i( HYPRE_Int *v,
HYPRE_Int *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
{
return;
}
hypre_swap2i( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
{
if (v[i] < v[left])
{
hypre_swap2i(v, w, ++last, i);
}
}
hypre_swap2i(v, w, left, last);
hypre_qsort2i(v, w, left, last-1);
hypre_qsort2i(v, w, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* sort on w (HYPRE_Real), move v (AB 11/04) */
void hypre_qsort2( HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (w[i] < w[left])
{
hypre_swap2(v, w, ++last, i);
}
hypre_swap2(v, w, left, last);
hypre_qsort2(v, w, left, last-1);
hypre_qsort2(v, w, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* sort on v, move w and z (AB 11/04) */
void hypre_qsort3i( HYPRE_Int *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
{
return;
}
hypre_swap3i( v, w, z, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
{
if (v[i] < v[left])
{
hypre_swap3i(v, w, z, ++last, i);
}
}
hypre_swap3i(v, w, z, left, last);
hypre_qsort3i(v, w, z, left, last-1);
hypre_qsort3i(v, w, z, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* sort min to max based on absolute value */
void hypre_qsort3_abs(HYPRE_Real *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap3_d( v, w, z, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(v[i]) < fabs(v[left]))
{
hypre_swap3_d(v,w, z, ++last, i);
}
hypre_swap3_d(v, w, z, left, last);
hypre_qsort3_abs(v, w, z, left, last-1);
hypre_qsort3_abs(v, w, z, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* sort min to max based on absolute value */
void hypre_qsort4_abs(HYPRE_Real *v,
HYPRE_Int *w,
HYPRE_Int *z,
HYPRE_Int *y,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap4_d( v, w, z, y, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(v[i]) < fabs(v[left]))
{
hypre_swap4_d(v,w, z, y, ++last, i);
}
hypre_swap4_d(v, w, z, y, left, last);
hypre_qsort4_abs(v, w, z, y, left, last-1);
hypre_qsort4_abs(v, w, z, y, last+1, right);
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* sort min to max based on absolute value */
void hypre_qsort_abs(HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap_d( w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) < fabs(w[left]))
{
hypre_swap_d(w, ++last, i);
}
hypre_swap_d(w, left, last);
hypre_qsort_abs(w, left, last-1);
hypre_qsort_abs(w, last+1, right);
}
| 9,716 | 24.638522 | 81 | c |
AMG | AMG-master/utilities/hypre_smp.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef HYPRE_SMP_HEADER
#define HYPRE_SMP_HEADER
#endif
#define HYPRE_SMP_SCHEDULE schedule(static)
| 1,028 | 41.875 | 81 | h |
AMG | AMG-master/utilities/memory_dmalloc.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Memory management utilities
*
* Routines to use "Debug Malloc Library", dmalloc
*
*****************************************************************************/
#ifdef HYPRE_MEMORY_DMALLOC
#include "hypre_memory.h"
#include <dmalloc.h>
char dmalloc_logpath_memory[256];
/*--------------------------------------------------------------------------
* hypre_InitMemoryDebugDML
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_InitMemoryDebugDML( HYPRE_Int id )
{
HYPRE_Int *iptr;
/* do this to get the Debug Malloc Library started/initialized */
iptr = hypre_TAlloc(HYPRE_Int, 1);
hypre_TFree(iptr);
dmalloc_logpath = dmalloc_logpath_memory;
hypre_sprintf(dmalloc_logpath, "dmalloc.log.%04d", id);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_FinalizeMemoryDebugDML
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FinalizeMemoryDebugDML( )
{
dmalloc_verify(NULL);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_MAllocDML
*--------------------------------------------------------------------------*/
char *
hypre_MAllocDML( HYPRE_Int size,
char *file,
HYPRE_Int line )
{
char *ptr;
if (size > 0)
ptr = _malloc_leap(file, line, size);
else
ptr = NULL;
return ptr;
}
/*--------------------------------------------------------------------------
* hypre_CAllocDML
*--------------------------------------------------------------------------*/
char *
hypre_CAllocDML( HYPRE_Int count,
HYPRE_Int elt_size,
char *file,
HYPRE_Int line )
{
char *ptr;
HYPRE_Int size = count*elt_size;
if (size > 0)
{
ptr = _calloc_leap(file, line, count, elt_size);
}
else
{
ptr = NULL;
}
return ptr;
}
/*--------------------------------------------------------------------------
* hypre_ReAllocDML
*--------------------------------------------------------------------------*/
char *
hypre_ReAllocDML( char *ptr,
HYPRE_Int size,
char *file,
HYPRE_Int line )
{
ptr = _realloc_leap(file, line, ptr, size);
return ptr;
}
/*--------------------------------------------------------------------------
* hypre_FreeDML
*--------------------------------------------------------------------------*/
void
hypre_FreeDML( char *ptr,
char *file,
HYPRE_Int line )
{
if (ptr)
{
_free_leap(file, line, ptr);
}
}
#else
/* this is used only to eliminate compiler warnings */
char hypre_memory_dmalloc_empty;
#endif
| 3,817 | 25.150685 | 81 | c |
AMG | AMG-master/utilities/qsplit.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
#include <math.h>
/*--------------------------------------------------------------------------
* hypre_DoubleQuickSplit
* C version of the routine "qsplit" from SPARSKIT
* Uses a quicksort-type algorithm to split data into
* highest "NumberCut" values without completely sorting them.
* Data is HYPRE_Real precision data.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_DoubleQuickSplit(HYPRE_Real *values, HYPRE_Int *indices,
HYPRE_Int list_length, HYPRE_Int NumberKept )
{
HYPRE_Int ierr = 0;
HYPRE_Real interchange_value;
HYPRE_Real abskey;
HYPRE_Int interchange_index;
HYPRE_Int first, last;
HYPRE_Int mid, j;
HYPRE_Int done;
first = 0;
last = list_length-1;
if ( (NumberKept < first+1) || (NumberKept > last+1) )
return( ierr );
/* Loop until the "midpoint" is NumberKept */
done = 0;
for ( ; !done; )
{
mid = first;
abskey = fabs( values[ mid ]);
for( j = first+1; j <= last; j ++)
{
if( fabs( values[ j ]) > abskey )
{
mid ++;
/* interchange values */
interchange_value = values[ mid];
interchange_index = indices[ mid];
values[ mid] = values[ j];
indices[ mid] = indices[ j];
values[ j] = interchange_value;
indices[ j] = interchange_index;
}
}
/* interchange the first and mid value */
interchange_value = values[ mid];
interchange_index = indices[ mid];
values[ mid] = values[ first];
indices[ mid] = indices[ first];
values[ first] = interchange_value;
indices[ first] = interchange_index;
if ( mid+1 == NumberKept )
{
done = 1;
break;
}
if ( mid+1 > NumberKept )
last = mid - 1;
else
first = mid + 1;
}
return ( ierr );
}
| 2,904 | 30.236559 | 81 | c |
AMG | AMG-master/utilities/random.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* This file contains routines that implement a pseudo-random number generator
* detailed in the following paper.
*
* @article{RNG_Park_Miller,
* author = {S. K. Park and K. W. Miller},
* title = {Random number generators: good ones are hard to find},
* journal = {Commun. ACM},
* volume = {31},
* number = {10},
* year = {1988},
* pages = {1192--1201},
* }
*
* This RNG has been shown to appear fairly random, it is a full period
* generating function (the sequence uses all of the values available to it up
* to 2147483647), and can be implemented on any architecture using 32-bit
* integers. The implementation in this file will not overflow for 32-bit
* arithmetic, which all modern computers should support.
*
* @author David Alber
* @date March 2005
*
*****************************************************************************/
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* Static variables
*--------------------------------------------------------------------------*/
static HYPRE_Int Seed = 13579;
#define a 16807
#define m 2147483647
#define q 127773
#define r 2836
/*--------------------------------------------------------------------------
* Initializes the pseudo-random number generator to a place in the sequence.
*
* @param seed an HYPRE_Int containing the seed for the RNG.
*--------------------------------------------------------------------------*/
void hypre_SeedRand( HYPRE_Int seed )
{
Seed = seed;
}
/*--------------------------------------------------------------------------
* Computes the next pseudo-random number in the sequence using the global
* variable Seed.
*
* @return a HYPRE_Int between (0, 2147483647]
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_RandI()
{
HYPRE_Int low, high, test;
high = Seed / q;
low = Seed % q;
test = a * low - r * high;
if(test > 0)
{
Seed = test;
}
else
{
Seed = test + m;
}
return Seed;
}
/*--------------------------------------------------------------------------
* Computes the next pseudo-random number in the sequence using the global
* variable Seed.
*
* @return a HYPRE_Real containing the next number in the sequence divided by
* 2147483647 so that the numbers are in (0, 1].
*--------------------------------------------------------------------------*/
HYPRE_Real hypre_Rand()
{
/*
HYPRE_Int low, high, test;
high = Seed / q;
low = Seed % q;
test = a * low - r * high;
if(test > 0)
{
Seed = test;
}
else
{
Seed = test + m;
}
*/
return ((HYPRE_Real)(hypre_RandI()) / m);
}
| 3,732 | 30.905983 | 81 | c |
AMG | AMG-master/utilities/threading.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include "_hypre_utilities.h"
#ifdef HYPRE_USING_OPENMP
HYPRE_Int
hypre_NumThreads( )
{
HYPRE_Int num_threads;
num_threads = omp_get_max_threads();
return num_threads;
}
/* This next function must be called from within a parallel region! */
HYPRE_Int
hypre_NumActiveThreads( )
{
HYPRE_Int num_threads;
num_threads = omp_get_num_threads();
return num_threads;
}
/* This next function must be called from within a parallel region! */
HYPRE_Int
hypre_GetThreadNum( )
{
HYPRE_Int my_thread_num;
my_thread_num = omp_get_thread_num();
return my_thread_num;
}
#endif
/* This next function must be called from within a parallel region! */
void
hypre_GetSimpleThreadPartition( HYPRE_Int *begin, HYPRE_Int *end, HYPRE_Int n )
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
*begin = hypre_min(n_per_thread*my_thread_num, n);
*end = hypre_min(*begin + n_per_thread, n);
}
| 1,996 | 26.356164 | 81 | c |
AMG | AMG-master/utilities/threading.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef hypre_THREADING_HEADER
#define hypre_THREADING_HEADER
#ifdef HYPRE_USING_OPENMP
HYPRE_Int hypre_NumThreads( void );
HYPRE_Int hypre_NumActiveThreads( void );
HYPRE_Int hypre_GetThreadNum( void );
#else
#define hypre_NumThreads() 1
#define hypre_NumActiveThreads() 1
#define hypre_GetThreadNum() 0
#endif
void hypre_GetSimpleThreadPartition( HYPRE_Int *begin, HYPRE_Int *end, HYPRE_Int n );
#endif
| 1,338 | 33.333333 | 85 | h |
AMG | AMG-master/utilities/timer.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/*
* File: timer.c
* Author: Scott Kohn ([email protected])
* Description: somewhat portable timing routines for C++, C, and Fortran
*
* If TIMER_USE_MPI is defined, then the MPI timers are used to get
* wallclock seconds, since we assume that the MPI timers have better
* resolution than the system timers.
*/
#include "_hypre_utilities.h"
#include <time.h>
#ifndef WIN32
#include <unistd.h>
#include <sys/times.h>
#endif
#ifdef TIMER_USE_MPI
#include "mpi.h"
#endif
HYPRE_Real time_getWallclockSeconds(void)
{
#ifdef TIMER_USE_MPI
return(hypre_MPI_Wtime());
#else
#ifdef WIN32
clock_t cl=clock();
return(((HYPRE_Real) cl)/((HYPRE_Real) CLOCKS_PER_SEC));
#else
struct tms usage;
hypre_longint wallclock = times(&usage);
return(((HYPRE_Real) wallclock)/((HYPRE_Real) sysconf(_SC_CLK_TCK)));
#endif
#endif
}
HYPRE_Real time_getCPUSeconds(void)
{
#ifndef TIMER_NO_SYS
clock_t cpuclock = clock();
return(((HYPRE_Real) (cpuclock))/((HYPRE_Real) CLOCKS_PER_SEC));
#else
return(0.0);
#endif
}
HYPRE_Real time_get_wallclock_seconds_(void)
{
return(time_getWallclockSeconds());
}
HYPRE_Real time_get_cpu_seconds_(void)
{
return(time_getCPUSeconds());
}
| 2,114 | 27.581081 | 81 | c |
AMG | AMG-master/utilities/timing.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Routines for doing timing.
*
*****************************************************************************/
#define HYPRE_TIMING
#define HYPRE_TIMING_GLOBALS
#include "_hypre_utilities.h"
#include "timing.h"
/*-------------------------------------------------------
* Timing macros
*-------------------------------------------------------*/
#define hypre_StartTiming() \
hypre_TimingWallCount -= time_getWallclockSeconds();\
hypre_TimingCPUCount -= time_getCPUSeconds()
#define hypre_StopTiming() \
hypre_TimingWallCount += time_getWallclockSeconds();\
hypre_TimingCPUCount += time_getCPUSeconds()
#define hypre_global_timing_ref(index,field) hypre_global_timing->field
/*--------------------------------------------------------------------------
* hypre_InitializeTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_InitializeTiming( const char *name )
{
HYPRE_Int time_index;
HYPRE_Real *old_wall_time;
HYPRE_Real *old_cpu_time;
HYPRE_Real *old_flops;
char **old_name;
HYPRE_Int *old_state;
HYPRE_Int *old_num_regs;
HYPRE_Int new_name;
HYPRE_Int i;
/*-------------------------------------------------------
* Allocate global TimingType structure if needed
*-------------------------------------------------------*/
if (hypre_global_timing == NULL)
{
hypre_global_timing = hypre_CTAlloc(hypre_TimingType, 1);
}
/*-------------------------------------------------------
* Check to see if name has already been registered
*-------------------------------------------------------*/
new_name = 1;
for (i = 0; i < (hypre_global_timing_ref(threadid, size)); i++)
{
if (hypre_TimingNumRegs(i) > 0)
{
if (strcmp(name, hypre_TimingName(i)) == 0)
{
new_name = 0;
time_index = i;
hypre_TimingNumRegs(time_index) ++;
break;
}
}
}
if (new_name)
{
for (i = 0; i < hypre_global_timing_ref(threadid ,size); i++)
{
if (hypre_TimingNumRegs(i) == 0)
{
break;
}
}
time_index = i;
}
/*-------------------------------------------------------
* Register the new timing name
*-------------------------------------------------------*/
if (new_name)
{
if (time_index == (hypre_global_timing_ref(threadid, size)))
{
old_wall_time = (hypre_global_timing_ref(threadid, wall_time));
old_cpu_time = (hypre_global_timing_ref(threadid, cpu_time));
old_flops = (hypre_global_timing_ref(threadid, flops));
old_name = (hypre_global_timing_ref(threadid, name));
old_state = (hypre_global_timing_ref(threadid, state));
old_num_regs = (hypre_global_timing_ref(threadid, num_regs));
(hypre_global_timing_ref(threadid, wall_time)) =
hypre_CTAlloc(HYPRE_Real, (time_index+1));
(hypre_global_timing_ref(threadid, cpu_time)) =
hypre_CTAlloc(HYPRE_Real, (time_index+1));
(hypre_global_timing_ref(threadid, flops)) =
hypre_CTAlloc(HYPRE_Real, (time_index+1));
(hypre_global_timing_ref(threadid, name)) =
hypre_CTAlloc(char *, (time_index+1));
(hypre_global_timing_ref(threadid, state)) =
hypre_CTAlloc(HYPRE_Int, (time_index+1));
(hypre_global_timing_ref(threadid, num_regs)) =
hypre_CTAlloc(HYPRE_Int, (time_index+1));
(hypre_global_timing_ref(threadid, size)) ++;
for (i = 0; i < time_index; i++)
{
hypre_TimingWallTime(i) = old_wall_time[i];
hypre_TimingCPUTime(i) = old_cpu_time[i];
hypre_TimingFLOPS(i) = old_flops[i];
hypre_TimingName(i) = old_name[i];
hypre_TimingState(i) = old_state[i];
hypre_TimingNumRegs(i) = old_num_regs[i];
}
hypre_TFree(old_wall_time);
hypre_TFree(old_cpu_time);
hypre_TFree(old_flops);
hypre_TFree(old_name);
hypre_TFree(old_state);
hypre_TFree(old_num_regs);
}
hypre_TimingName(time_index) = hypre_CTAlloc(char, 80);
strncpy(hypre_TimingName(time_index), name, 79);
hypre_TimingState(time_index) = 0;
hypre_TimingNumRegs(time_index) = 1;
(hypre_global_timing_ref(threadid, num_names)) ++;
}
return time_index;
}
/*--------------------------------------------------------------------------
* hypre_FinalizeTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FinalizeTiming( HYPRE_Int time_index )
{
HYPRE_Int ierr = 0;
HYPRE_Int i;
if (hypre_global_timing == NULL)
return ierr;
if (time_index < (hypre_global_timing_ref(threadid, size)))
{
if (hypre_TimingNumRegs(time_index) > 0)
{
hypre_TimingNumRegs(time_index) --;
}
if (hypre_TimingNumRegs(time_index) == 0)
{
hypre_TFree(hypre_TimingName(time_index));
(hypre_global_timing_ref(threadid, num_names)) --;
}
}
if ((hypre_global_timing -> num_names) == 0)
{
for (i = 0; i < (hypre_global_timing -> size); i++)
{
hypre_TFree(hypre_global_timing_ref(i, wall_time));
hypre_TFree(hypre_global_timing_ref(i, cpu_time));
hypre_TFree(hypre_global_timing_ref(i, flops));
hypre_TFree(hypre_global_timing_ref(i, name));
hypre_TFree(hypre_global_timing_ref(i, state));
hypre_TFree(hypre_global_timing_ref(i, num_regs));
}
hypre_TFree(hypre_global_timing);
hypre_global_timing = NULL;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_IncFLOPCount
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IncFLOPCount( HYPRE_Int inc )
{
HYPRE_Int ierr = 0;
if (hypre_global_timing == NULL)
return ierr;
hypre_TimingFLOPCount += (HYPRE_Real) (inc);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_BeginTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BeginTiming( HYPRE_Int time_index )
{
HYPRE_Int ierr = 0;
if (hypre_global_timing == NULL)
return ierr;
if (hypre_TimingState(time_index) == 0)
{
hypre_StopTiming();
hypre_TimingWallTime(time_index) -= hypre_TimingWallCount;
hypre_TimingCPUTime(time_index) -= hypre_TimingCPUCount;
hypre_TimingFLOPS(time_index) -= hypre_TimingFLOPCount;
hypre_StartTiming();
}
hypre_TimingState(time_index) ++;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_EndTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_EndTiming( HYPRE_Int time_index )
{
HYPRE_Int ierr = 0;
if (hypre_global_timing == NULL)
return ierr;
hypre_TimingState(time_index) --;
if (hypre_TimingState(time_index) == 0)
{
hypre_StopTiming();
hypre_TimingWallTime(time_index) += hypre_TimingWallCount;
hypre_TimingCPUTime(time_index) += hypre_TimingCPUCount;
hypre_TimingFLOPS(time_index) += hypre_TimingFLOPCount;
hypre_StartTiming();
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_ClearTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ClearTiming( )
{
HYPRE_Int ierr = 0;
HYPRE_Int i;
if (hypre_global_timing == NULL)
return ierr;
for (i = 0; i < (hypre_global_timing_ref(threadid,size)); i++)
{
hypre_TimingWallTime(i) = 0.0;
hypre_TimingCPUTime(i) = 0.0;
hypre_TimingFLOPS(i) = 0.0;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_PrintTiming
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrintTiming( const char *heading,
HYPRE_Real *wall_time_ptr,
MPI_Comm comm )
{
HYPRE_Int ierr = 0;
HYPRE_Real local_wall_time;
HYPRE_Real local_cpu_time;
HYPRE_Real wall_time;
HYPRE_Real cpu_time;
HYPRE_Real wall_mflops;
HYPRE_Real cpu_mflops;
HYPRE_Int i;
HYPRE_Int myrank;
if (hypre_global_timing == NULL)
return ierr;
hypre_MPI_Comm_rank(comm, &myrank );
/* print heading */
if (myrank == 0)
{
hypre_printf("=============================================\n");
hypre_printf("%s:\n", heading);
hypre_printf("=============================================\n");
}
for (i = 0; i < (hypre_global_timing -> size); i++)
{
if (hypre_TimingNumRegs(i) > 0)
{
local_wall_time = hypre_TimingWallTime(i);
local_cpu_time = hypre_TimingCPUTime(i);
hypre_MPI_Allreduce(&local_wall_time, &wall_time, 1,
hypre_MPI_REAL, hypre_MPI_MAX, comm);
hypre_MPI_Allreduce(&local_cpu_time, &cpu_time, 1,
hypre_MPI_REAL, hypre_MPI_MAX, comm);
if (myrank == 0)
{
hypre_printf("%s:\n", hypre_TimingName(i));
*wall_time_ptr = wall_time;
/* print wall clock info */
hypre_printf(" wall clock time = %f seconds\n", wall_time);
if (wall_time)
wall_mflops = hypre_TimingFLOPS(i) / wall_time / 1.0E6;
else
wall_mflops = 0.0;
hypre_printf(" wall MFLOPS = %f\n", wall_mflops);
/* print CPU clock info */
hypre_printf(" cpu clock time = %f seconds\n", cpu_time);
if (cpu_time)
cpu_mflops = hypre_TimingFLOPS(i) / cpu_time / 1.0E6;
else
cpu_mflops = 0.0;
hypre_printf(" cpu MFLOPS = %f\n\n", cpu_mflops);
}
}
}
return ierr;
}
| 11,219 | 29.572207 | 81 | c |
AMG | AMG-master/utilities/timing.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file for doing timing
*
*****************************************************************************/
#ifndef HYPRE_TIMING_HEADER
#define HYPRE_TIMING_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
* Prototypes for low-level timing routines
*--------------------------------------------------------------------------*/
/* timer.c */
HYPRE_Real time_getWallclockSeconds( void );
HYPRE_Real time_getCPUSeconds( void );
HYPRE_Real time_get_wallclock_seconds_( void );
HYPRE_Real time_get_cpu_seconds_( void );
/*--------------------------------------------------------------------------
* With timing off
*--------------------------------------------------------------------------*/
#ifndef HYPRE_TIMING
#define hypre_InitializeTiming(name) 0
#define hypre_FinalizeTiming(index)
#define hypre_IncFLOPCount(inc)
#define hypre_BeginTiming(i)
#define hypre_EndTiming(i)
#define hypre_PrintTiming(heading, comm)
#define hypre_ClearTiming()
/*--------------------------------------------------------------------------
* With timing on
*--------------------------------------------------------------------------*/
#else
/*-------------------------------------------------------
* Global timing structure
*-------------------------------------------------------*/
typedef struct
{
HYPRE_Real *wall_time;
HYPRE_Real *cpu_time;
HYPRE_Real *flops;
char **name;
HYPRE_Int *state; /* boolean flag to allow for recursive timing */
HYPRE_Int *num_regs; /* count of how many times a name is registered */
HYPRE_Int num_names;
HYPRE_Int size;
HYPRE_Real wall_count;
HYPRE_Real CPU_count;
HYPRE_Real FLOP_count;
} hypre_TimingType;
#ifdef HYPRE_TIMING_GLOBALS
hypre_TimingType *hypre_global_timing = NULL;
#else
extern hypre_TimingType *hypre_global_timing;
#endif
/*-------------------------------------------------------
* Accessor functions
*-------------------------------------------------------*/
#define hypre_TimingWallTime(i) (hypre_global_timing -> wall_time[(i)])
#define hypre_TimingCPUTime(i) (hypre_global_timing -> cpu_time[(i)])
#define hypre_TimingFLOPS(i) (hypre_global_timing -> flops[(i)])
#define hypre_TimingName(i) (hypre_global_timing -> name[(i)])
#define hypre_TimingState(i) (hypre_global_timing -> state[(i)])
#define hypre_TimingNumRegs(i) (hypre_global_timing -> num_regs[(i)])
#define hypre_TimingWallCount (hypre_global_timing -> wall_count)
#define hypre_TimingCPUCount (hypre_global_timing -> CPU_count)
#define hypre_TimingFLOPCount (hypre_global_timing -> FLOP_count)
/*-------------------------------------------------------
* Prototypes
*-------------------------------------------------------*/
/* timing.c */
HYPRE_Int hypre_InitializeTiming( const char *name );
HYPRE_Int hypre_FinalizeTiming( HYPRE_Int time_index );
HYPRE_Int hypre_IncFLOPCount( HYPRE_Int inc );
HYPRE_Int hypre_BeginTiming( HYPRE_Int time_index );
HYPRE_Int hypre_EndTiming( HYPRE_Int time_index );
HYPRE_Int hypre_ClearTiming( void );
HYPRE_Int hypre_PrintTiming( const char *heading , MPI_Comm comm );
#endif
#ifdef __cplusplus
}
#endif
#endif
| 4,315 | 32.71875 | 81 | h |
AMG | AMG-master/utilities/umalloc_local.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifdef HYPRE_USE_UMALLOC
#include "umalloc_local.h"
void *_uget_fn(Heap_t usrheap, size_t *length, HYPRE_Int *clean)
{
void *p;
*length = ((*length) / INITIAL_HEAP_SIZE) * INITIAL_HEAP_SIZE
+ INITIAL_HEAP_SIZE;
*clean = _BLOCK_CLEAN;
p = (void *) calloc(*length, 1);
return p;
}
void _urelease_fn(Heap_t usrheap, void *p, size_t size)
{
free (p);
return;
}
#else
/* this is used only to eliminate compiler warnings */
char umalloc_empty;
#endif
| 1,416 | 29.804348 | 81 | c |
AMG | AMG-master/utilities/umalloc_local.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang ([email protected]) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef UMALLOC_LOCAL_HEADER
#define UMALLOC_LOCAL_HEADER
#ifdef HYPRE_USE_UMALLOC
#include <umalloc.h>
#define MAX_THREAD_COUNT 10
#define INITIAL_HEAP_SIZE 500000
#define GET_MISS_COUNTS
struct upc_struct
{
Heap_t myheap;
};
void *_uinitial_block[MAX_THREAD_COUNT];
struct upc_struct _uparam[MAX_THREAD_COUNT];
HYPRE_Int _uheapReleasesCount=0;
HYPRE_Int _uheapGetsCount=0;
void *_uget_fn(Heap_t usrheap, size_t *length, HYPRE_Int *clean);
void _urelease_fn(Heap_t usrheap, void *p, size_t size);
#endif
#endif
| 1,449 | 29.851064 | 81 | h |
PB-DFS | PB-DFS-master/PySCIPOpt/VC9-include/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
EEOver | EEOver-master/config.h | /* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.ac by autoheader. */
/* Disable deprecated functions and enums while building */
#define GSL_DISABLE_DEPRECATED 1
/* Define if you have inline with C99 behavior */
/* #undef HAVE_C99_INLINE */
/* Define to 1 if you have the declaration of `acosh', and to 0 if you don't.
*/
#define HAVE_DECL_ACOSH 1
/* Define to 1 if you have the declaration of `asinh', and to 0 if you don't.
*/
#define HAVE_DECL_ASINH 1
/* Define to 1 if you have the declaration of `atanh', and to 0 if you don't.
*/
#define HAVE_DECL_ATANH 1
/* Define to 1 if you have the declaration of `expm1', and to 0 if you don't.
*/
#define HAVE_DECL_EXPM1 1
/* Define to 1 if you have the declaration of `feenableexcept', and to 0 if
you don't. */
#define HAVE_DECL_FEENABLEEXCEPT 1
/* Define to 1 if you have the declaration of `fesettrapenable', and to 0 if
you don't. */
#define HAVE_DECL_FESETTRAPENABLE 0
/* Define to 1 if you have the declaration of `finite', and to 0 if you don't.
*/
#define HAVE_DECL_FINITE 1
/* Define to 1 if you have the declaration of `fprnd_t', and to 0 if you
don't. */
#define HAVE_DECL_FPRND_T 0
/* Define to 1 if you have the declaration of `frexp', and to 0 if you don't.
*/
#define HAVE_DECL_FREXP 1
/* Define to 1 if you have the declaration of `hypot', and to 0 if you don't.
*/
#define HAVE_DECL_HYPOT 1
/* Define to 1 if you have the declaration of `isfinite', and to 0 if you
don't. */
#define HAVE_DECL_ISFINITE 1
/* Define to 1 if you have the declaration of `isinf', and to 0 if you don't.
*/
#define HAVE_DECL_ISINF 1
/* Define to 1 if you have the declaration of `isnan', and to 0 if you don't.
*/
#define HAVE_DECL_ISNAN 1
/* Define to 1 if you have the declaration of `ldexp', and to 0 if you don't.
*/
#define HAVE_DECL_LDEXP 1
/* Define to 1 if you have the declaration of `log1p', and to 0 if you don't.
*/
#define HAVE_DECL_LOG1P 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
/* #undef HAVE_DOPRNT */
/* Defined if you have ansi EXIT_SUCCESS and EXIT_FAILURE in stdlib.h */
#define HAVE_EXIT_SUCCESS_AND_FAILURE 1
/* Defined on architectures with excess floating-point precision */
#define HAVE_EXTENDED_PRECISION_REGISTERS 1
/* Define if x86 processor has sse extensions. */
#define HAVE_FPU_X86_SSE 1
/* Define to 1 if you have the <ieeefp.h> header file. */
/* #undef HAVE_IEEEFP_H */
/* Define this if IEEE comparisons work correctly (e.g. NaN != NaN) */
#define HAVE_IEEE_COMPARISONS 1
/* Define this if IEEE denormalized numbers are available */
#define HAVE_IEEE_DENORMALS 1
/* Define if you have inline */
#define HAVE_INLINE 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the `m' library (-lm). */
#define HAVE_LIBM 1
/* Define to 1 if you have the `memcpy' function. */
#define HAVE_MEMCPY 1
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define this if printf can handle %Lf for long double */
#define HAVE_PRINTF_LONGDOUBLE 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strdup' function. */
#define HAVE_STRDUP 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strtol' function. */
#define HAVE_STRTOL 1
/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the `vprintf' function. */
#define HAVE_VPRINTF 1
/* Define if you need to hide the static definitions of inline functions */
/* #undef HIDE_INLINE_STATIC */
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "gsl"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "gsl"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "gsl 1.15"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "gsl"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "1.15"
/* Defined if this is an official release */
#define RELEASED /**/
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Version number of package */
#define VERSION "1.15"
/* Define to 1 if type `char' is unsigned and you are not using gcc. */
#ifndef __CHAR_UNSIGNED__
/* # undef __CHAR_UNSIGNED__ */
#endif
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to empty if the keyword `volatile' does not work. Warning: valid
code using `volatile' can become incorrect without. Disable with care. */
/* #undef volatile */
/* Use 0 and 1 for EXIT_SUCCESS and EXIT_FAILURE if we don't have them */
#if !HAVE_EXIT_SUCCESS_AND_FAILURE
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
#endif
/* Define one of these if you have a known IEEE arithmetic interface */
/* #undef HAVE_GNUSPARC_IEEE_INTERFACE */
/* #undef HAVE_GNUM68K_IEEE_INTERFACE */
/* #undef HAVE_GNUPPC_IEEE_INTERFACE */
#define HAVE_GNUX86_IEEE_INTERFACE 1
/* #undef HAVE_SUNOS4_IEEE_INTERFACE */
/* #undef HAVE_SOLARIS_IEEE_INTERFACE */
/* #undef HAVE_HPUX11_IEEE_INTERFACE */
/* #undef HAVE_HPUX_IEEE_INTERFACE */
/* #undef HAVE_TRU64_IEEE_INTERFACE */
/* #undef HAVE_IRIX_IEEE_INTERFACE */
/* #undef HAVE_AIX_IEEE_INTERFACE */
/* #undef HAVE_FREEBSD_IEEE_INTERFACE */
/* #undef HAVE_OS2EMX_IEEE_INTERFACE */
/* #undef HAVE_NETBSD_IEEE_INTERFACE */
/* #undef HAVE_OPENBSD_IEEE_INTERFACE */
/* #undef HAVE_DARWIN_IEEE_INTERFACE */
/* #undef HAVE_DARWIN86_IEEE_INTERFACE */
/* Define a rounding function which moves extended precision values
out of registers and rounds them to double-precision. This should
be used *sparingly*, in places where it is necessary to keep
double-precision rounding for critical expressions while running in
extended precision. For example, the following code should ensure
exact equality, even when extended precision registers are in use,
double q = GSL_COERCE_DBL(3.0/7.0) ;
if (q == GSL_COERCE_DBL(3.0/7.0)) { ... } ;
It carries a penalty even when the program is running in double
precision mode unless you compile a separate version of the
library with HAVE_EXTENDED_PRECISION_REGISTERS turned off. */
#if HAVE_EXTENDED_PRECISION_REGISTERS
#define GSL_COERCE_DBL(x) (gsl_coerce_double(x))
#else
#define GSL_COERCE_DBL(x) (x)
#endif
/* Substitute gsl functions for missing system functions */
#if !HAVE_DECL_HYPOT
#define hypot gsl_hypot
#endif
#if !HAVE_DECL_LOG1P
#define log1p gsl_log1p
#endif
#if !HAVE_DECL_EXPM1
#define expm1 gsl_expm1
#endif
#if !HAVE_DECL_ACOSH
#define acosh gsl_acosh
#endif
#if !HAVE_DECL_ASINH
#define asinh gsl_asinh
#endif
#if !HAVE_DECL_ATANH
#define atanh gsl_atanh
#endif
#if !HAVE_DECL_LDEXP
#define ldexp gsl_ldexp
#endif
#if !HAVE_DECL_FREXP
#define frexp gsl_frexp
#endif
#if !HAVE_DECL_ISINF
#define isinf gsl_isinf
#endif
#if !HAVE_DECL_ISFINITE
#define isfinite gsl_finite
#endif
#if !HAVE_DECL_FINITE
#define finite gsl_finite
#endif
#if !HAVE_DECL_ISNAN
#define isnan gsl_isnan
#endif
#ifdef __GNUC__
#define DISCARD_POINTER(p) do { ; } while(p ? 0 : 0);
#else
#define DISCARD_POINTER(p) /* ignoring discarded pointer */
#endif
#if defined(GSL_RANGE_CHECK_OFF) || !defined(GSL_RANGE_CHECK)
#define GSL_RANGE_CHECK 0 /* turn off range checking by default internally */
#endif
#define RETURN_IF_NULL(x) if (!x) { return ; }
| 8,597 | 26.735484 | 78 | h |
EEOver | EEOver-master/program_constants.h | //===========================================================================
//== INCLUDE ANSI C SYSTEM HEADER FILES =====================================
//===========================================================================
#include <math.h> //-- for calls to trig, sqrt and power functions
// #include <iostream>
//#include <math> //-- for calls to trig, sqrt and power functions
//==========================================================================
//== DEFINE PROGRAM CONSTANTS ==============================================
//==========================================================================
#define NORMAL_TERMINATION 0
#define NO_INTERSECTION_POINTS 100
#define ONE_INTERSECTION_POINT 101
#define LINE_TANGENT_TO_ELLIPSE 102
#define DISJOINT_ELLIPSES 103
#define ELLIPSE2_OUTSIDETANGENT_ELLIPSE1 104
#define ELLIPSE2_INSIDETANGENT_ELLIPSE1 105
#define ELLIPSES_INTERSECT 106
#define TWO_INTERSECTION_POINTS 107
#define THREE_INTERSECTION_POINTS 108
#define FOUR_INTERSECTION_POINTS 109
#define ELLIPSE1_INSIDE_ELLIPSE2 110
#define ELLIPSE2_INSIDE_ELLIPSE1 111
#define ELLIPSES_ARE_IDENTICAL 112
#define INTERSECTION_POINT 113
#define TANGENT_POINT 114
#define ERROR_ELLIPSE_PARAMETERS -100
#define ERROR_DEGENERATE_ELLIPSE -101
#define ERROR_POINTS_NOT_ON_ELLIPSE -102
#define ERROR_INVERSE_TRIG -103
#define ERROR_LINE_POINTS -104
#define ERROR_QUARTIC_CASE -105
#define ERROR_POLYNOMIAL_DEGREE -107
#define ERROR_POLYNOMIAL_ROOTS -108
#define ERROR_INTERSECTION_PTS -109
#define ERROR_CALCULATIONS -112
#define EPS +1.0E-05
#define pi (2.0*asin (1.0)) //-- a maximum-precision value of pi
#define twopi (2.0*pi) //-- a maximum-precision value of 2*pi
enum boolean { GSL = 0, TOMS, GEMS, BOOST };
| 1,929 | 32.859649 | 77 | h |
EEOver | EEOver-master/solvers.h | #include <gsl/gsl_math.h>
#include <gsl/gsl_test.h>
#include <gsl/gsl_ieee_utils.h>
#include <gsl/gsl_poly.h>
/* Solve for real or complex roots of the quartic equation
* x^4 + a x^3 + b x^2 + c x + d = 0,
* returning the number of such roots.
*
* Roots are returned ordered.
* Author: Andrew Steiner
*/
int gsl_poly_solve_quartic (double a, double b, double c, double d,
double * x0, double * x1,
double * x2, double * x3);
/* Solve for the complex roots of a general real polynomial */
int gsl_poly_complex_solve_quartic(double a, double b, double c, double d,
gsl_complex * z0, gsl_complex * z1,
gsl_complex * z2, gsl_complex * z3);
//---------------------------------------------------------------------------
int SolveQuadric(double c[ 3 ], double s[ 2 ] );
int SolveCubic( double c[ 4 ], double s[ 3 ] );
int SolveQuartic(double c[ 5 ], double s[ 4 ] );
//---------------------------------------------------------------------------
//-- functions for solving the quartic equation from Netlib/TOMS
void BIQUADROOTS (double p[], double r[][5]);
void CUBICROOTS (double p[], double r[][5]);
void QUADROOTS (double p[], double r[][5]);
//---------------------------------------------------------------------------
double nointpts (double A1, double B1, double A2, double B2, double H1,
double K1, double H2, double K2, double PHI_1, double PHI_2,
double H2_TR, double K2_TR, double AA, double BB,
double CC, double DD, double EE, double FF, int *rtnCode);
double twointpts (double xint[], double yint[], double A1, double B1,
double PHI_1, double A2, double B2, double H2_TR,
double K2_TR, double PHI_2, double AA, double BB,
double CC, double DD, double EE, double FF, int *rtnCode);
double threeintpts (double xint[], double yint[], double A1, double B1,
double PHI_1, double A2, double B2, double H2_TR,
double K2_TR, double PHI_2, double AA, double BB,
double CC, double DD, double EE, double FF,
int *rtnCode);
double fourintpts (double xint[], double yint[], double A1, double B1,
double PHI_1, double A2, double B2, double H2_TR,
double K2_TR, double PHI_2, double AA, double BB,
double CC, double DD, double EE, double FF, int *rtnCode);
int istanpt (double x, double y, double A1, double B1, double AA, double BB,
double CC, double DD, double EE, double FF);
double ellipse2tr (double x, double y, double AA, double BB,
double CC, double DD, double EE, double FF);
//===========================================================================
//== ELLIPSE-ELLIPSE OVERLAP ================================================
//===========================================================================
//choice=1: use gsl_poly_complex_solve()
//choice=2: use Andrew Steiner's gsl_poly_complex_solve_quartic()
double ellipse_ellipse_overlap_gsl(double PHI_1, double A1, double B1,
double H1, double K1, double PHI_2,
double A2, double B2, double H2, double K2,
double X[4], double Y[4], int * NROOTS,
int *rtnCode, int choice);
double ellipse_ellipse_overlap_netlibs(double PHI_1, double A1, double B1,
double H1, double K1, double PHI_2,
double A2, double B2, double H2, double K2,
double X[4], double Y[4], int * NROOTS,
int *rtnCode);
double ellipse_ellipse_overlap_gems(double PHI_1, double A1, double B1,
double H1, double K1, double PHI_2,
double A2, double B2, double H2, double K2,
double X[4], double Y[4], int * NROOTS,
int *rtnCode);
int double_cmp(const void *a, const void *b) ;
| 4,125 | 45.886364 | 77 | h |
EEOver | EEOver-master/zsolve_quartic.c | /* poly/zsolve_quartic.c
*
* Copyright (C) 2003 CERN and K.S. K\"{o}lbig
*
* Converted from CERNLIB to C and implemented into the GSL Library
* by Andrew W. Steiner and Andy Buckley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* zsolve_quartic.c - finds the complex roots of
* x^4 + a x^3 + b x^2 + c x + d = 0
*/
#include "config.h"
#include <math.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_complex.h>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_poly.h>
#define SWAP(a,b) do { gsl_complex tmp = b ; b = a ; a = tmp ; } while(0)
int
gsl_poly_complex_solve_quartic (double a, double b, double c, double d,
gsl_complex * z0, gsl_complex * z1,
gsl_complex * z2, gsl_complex * z3)
{
gsl_complex i, zarr[4], w1, w2, w3;
double r4 = 1.0 / 4.0;
double q2 = 1.0 / 2.0, q4 = 1.0 / 4.0, q8 = 1.0 / 8.0;
double q1 = 3.0 / 8.0, q3 = 3.0 / 16.0;
double u[3], v[3], v1, v2, disc;
double aa, pp, qq, rr, rc, sc, tc, h;
int k1 = 0, k2 = 0, mt;
GSL_SET_COMPLEX (&i, 0.0, 1.0);
GSL_SET_COMPLEX (&zarr[0], 0.0, 0.0);
GSL_SET_COMPLEX (&zarr[1], 0.0, 0.0);
GSL_SET_COMPLEX (&zarr[2], 0.0, 0.0);
GSL_SET_COMPLEX (&zarr[3], 0.0, 0.0);
GSL_SET_COMPLEX (&w1, 0.0, 0.0);
GSL_SET_COMPLEX (&w2, 0.0, 0.0);
GSL_SET_COMPLEX (&w3, 0.0, 0.0);
/* Deal easily with the cases where the quartic is degenerate. The
* ordering of solutions is done explicitly. */
if (0 == b && 0 == c)
{
if (0 == d)
{
if (a > 0)
{
GSL_SET_COMPLEX (z0, -a, 0.0);
GSL_SET_COMPLEX (z1, 0.0, 0.0);
GSL_SET_COMPLEX (z2, 0.0, 0.0);
GSL_SET_COMPLEX (z3, 0.0, 0.0);
}
else
{
GSL_SET_COMPLEX (z0, 0.0, 0.0);
GSL_SET_COMPLEX (z1, 0.0, 0.0);
GSL_SET_COMPLEX (z2, 0.0, 0.0);
GSL_SET_COMPLEX (z3, -a, 0.0);
}
return 4;
}
else if (0 == a)
{
if (d > 0)
{
double sqrt_d = sqrt (d);
gsl_complex i_sqrt_d = gsl_complex_mul_real (i, sqrt_d);
gsl_complex minus_i = gsl_complex_conjugate (i);
*z3 = gsl_complex_sqrt (i_sqrt_d);
*z2 = gsl_complex_mul (minus_i, *z3);
*z1 = gsl_complex_negative (*z2);
*z0 = gsl_complex_negative (*z3);
}
else
{
double sqrt_abs_d = sqrt (-d);
*z3 = gsl_complex_sqrt_real (sqrt_abs_d);
*z2 = gsl_complex_mul (i, *z3);
*z1 = gsl_complex_negative (*z2);
*z0 = gsl_complex_negative (*z3);
}
return 4;
}
}
if (0.0 == c && 0.0 == d)
{
disc = (a * a - 4.0 * b);
if (disc < 0.0)
{
/* 2 complex roots and 2 real roots for the quartic
*/
mt = 3;
}
else
{
/* 4 real roots for the quartic
*/
mt = 1;
}
*z0 = zarr[0];
*z1 = zarr[0];
gsl_poly_complex_solve_quadratic (1.0, a, b, z2, z3);
}
else
{
/* For non-degenerate solutions, proceed by constructing and
* solving the resolvent cubic */
aa = a * a;
pp = b - q1 * aa;
qq = c - q2 * a * (b - q4 * aa);
rr = d - q4 * (a * c - q4 * aa * (b - q3 * aa));
rc = q2 * pp;
sc = q4 * (q4 * pp * pp - rr);
tc = -(q8 * qq * q8 * qq);
/* This code solves the resolvent cubic in a convenient fashion
* for this implementation of the quartic. If there are three real
* roots, then they are placed directly into u[]. If two are
* complex, then the real root is put into u[0] and the real
* and imaginary part of the complex roots are placed into
* u[1] and u[2], respectively. Additionally, this
* calculates the discriminant of the cubic and puts it into the
* variable disc. */
{
double qcub = (rc * rc - 3 * sc);
double rcub = (2 * rc * rc * rc - 9 * rc * sc + 27 * tc);
double Q = qcub / 9;
double R = rcub / 54;
double Q3 = Q * Q * Q;
double R2 = R * R;
double CR2 = 729 * rcub * rcub;
double CQ3 = 2916 * qcub * qcub * qcub;
disc = (CR2 - CQ3) / 2125764.0;
if (0 == R && 0 == Q)
{
u[0] = -rc / 3;
u[1] = -rc / 3;
u[2] = -rc / 3;
}
else if (CR2 == CQ3)
{
double sqrtQ = sqrt (Q);
if (R > 0)
{
u[0] = -2 * sqrtQ - rc / 3;
u[1] = sqrtQ - rc / 3;
u[2] = sqrtQ - rc / 3;
}
else
{
u[0] = -sqrtQ - rc / 3;
u[1] = -sqrtQ - rc / 3;
u[2] = 2 * sqrtQ - rc / 3;
}
}
else if (CR2 < CQ3)
{
double sqrtQ = sqrt (Q);
double sqrtQ3 = sqrtQ * sqrtQ * sqrtQ;
double theta = acos (R / sqrtQ3);
if (R / sqrtQ3 >= 1.0)
theta = 0.0;
{
double norm = -2 * sqrtQ;
u[0] = norm * cos (theta / 3) - rc / 3;
u[1] = norm * cos ((theta + 2.0 * M_PI) / 3) - rc / 3;
u[2] = norm * cos ((theta - 2.0 * M_PI) / 3) - rc / 3;
}
}
else
{
double sgnR = (R >= 0 ? 1 : -1);
double modR = fabs (R);
double sqrt_disc = sqrt (R2 - Q3);
double A = -sgnR * pow (modR + sqrt_disc, 1.0 / 3.0);
double B = Q / A;
double mod_diffAB = fabs (A - B);
u[0] = A + B - rc / 3;
u[1] = -0.5 * (A + B) - rc / 3;
u[2] = -(sqrt (3.0) / 2.0) * mod_diffAB;
}
}
/* End of solution to resolvent cubic */
/* Combine the square roots of the roots of the cubic
* resolvent appropriately. Also, calculate 'mt' which
* designates the nature of the roots:
* mt=1 : 4 real roots
* mt=2 : 0 real roots
* mt=3 : 2 real roots
*/
if (0 == disc)
{
u[2] = u[1];
}
if (0 >= disc)
{
mt = 2;
v[0] = fabs (u[0]);
v[1] = fabs (u[1]);
v[2] = fabs (u[2]);
v1 = GSL_MAX (GSL_MAX (v[0], v[1]), v[2]);
if (v1 == v[0])
{
k1 = 0;
v2 = GSL_MAX (v[1], v[2]);
}
else if (v1 == v[1])
{
k1 = 1;
v2 = GSL_MAX (v[0], v[2]);
}
else
{
k1 = 2;
v2 = GSL_MAX (v[0], v[1]);
}
if (v2 == v[0])
{
k2 = 0;
}
else if (v2 == v[1])
{
k2 = 1;
}
else
{
k2 = 2;
}
w1 = gsl_complex_sqrt_real (u[k1]);
w2 = gsl_complex_sqrt_real (u[k2]);
}
else
{
mt = 3;
GSL_SET_COMPLEX (&w1, u[1], u[2]);
GSL_SET_COMPLEX (&w2, u[1], -u[2]);
w1 = gsl_complex_sqrt (w1);
w2 = gsl_complex_sqrt (w2);
}
/* Solve the quadratic in order to obtain the roots
* to the quartic */
if (0.0 != gsl_complex_abs (gsl_complex_mul (w1, w2))) {
w3 = gsl_complex_mul_real (gsl_complex_inverse
(gsl_complex_mul (w1, w2)), -qq / 8.0);
}
h = r4 * a;
zarr[0] = gsl_complex_add_real (gsl_complex_add
(gsl_complex_add (w1, w2), w3), -h);
zarr[1] = gsl_complex_add_real (gsl_complex_add
(gsl_complex_negative
(gsl_complex_add (w1, w2)), w3), -h);
zarr[2] = gsl_complex_add_real (gsl_complex_sub
(gsl_complex_sub (w2, w1), w3), -h);
zarr[3] = gsl_complex_add_real (gsl_complex_sub
(gsl_complex_sub (w1, w2), w3), -h);
/* Arrange the roots into the variables z0, z1, z2, z3 */
if (2 == mt)
{
if (u[k1] >= 0 && u[k2] >= 0)
{
mt = 1;
GSL_SET_COMPLEX (z0, GSL_REAL (zarr[0]), 0.0);
GSL_SET_COMPLEX (z1, GSL_REAL (zarr[1]), 0.0);
GSL_SET_COMPLEX (z2, GSL_REAL (zarr[2]), 0.0);
GSL_SET_COMPLEX (z3, GSL_REAL (zarr[3]), 0.0);
}
else if (u[k1] >= 0 && u[k2] < 0)
{
*z0 = zarr[0];
*z1 = zarr[3];
*z2 = zarr[2];
*z3 = zarr[1];
}
else if (u[k1] < 0 && u[k2] >= 0)
{
*z0 = zarr[0];
*z1 = zarr[2];
*z2 = zarr[3];
*z3 = zarr[1];
}
else if (u[k1] < 0 && u[k2] < 0)
{
*z0 = zarr[0];
*z1 = zarr[1];
*z2 = zarr[3];
*z3 = zarr[2];
}
}
else if (3 == mt)
{
GSL_SET_COMPLEX (z0, GSL_REAL (zarr[0]), 0.0);
GSL_SET_COMPLEX (z1, GSL_REAL (zarr[1]), 0.0);
*z2 = zarr[3];
*z3 = zarr[2];
}
}
/*
* Sort the roots as usual.
* This code is most likely not optimal.
*/
if (1 == mt)
{
/* Roots are all real, sort them by the real part */
if (GSL_REAL (*z0) > GSL_REAL (*z1)) SWAP (*z0, *z1);
if (GSL_REAL (*z0) > GSL_REAL (*z2)) SWAP (*z0, *z2);
if (GSL_REAL (*z0) > GSL_REAL (*z3)) SWAP (*z0, *z3);
if (GSL_REAL (*z1) > GSL_REAL (*z2)) SWAP (*z1, *z2);
if (GSL_REAL (*z2) > GSL_REAL (*z3))
{
SWAP (*z2, *z3);
if (GSL_REAL (*z1) > GSL_REAL (*z2)) SWAP (*z1, *z2);
}
}
else if (2 == mt)
{
/* Roots are all complex. z0 and z1 are conjugates
* and z2 and z3 are conjugates. */
/* If all of the real parts are equal, just sort
by the imaginary parts */
if (GSL_REAL (*z0) == GSL_REAL (*z2))
{
/* Ensure that the pairs are ordered so that the
root with negative imaginary part is first
*/
if (GSL_IMAG (*z2) > GSL_IMAG (*z3)) SWAP (*z2, *z3);
if (GSL_IMAG (*z0) > GSL_IMAG (*z1)) SWAP (*z0, *z1);
if (GSL_IMAG (*z0) < GSL_IMAG (*z2))
{
SWAP (*z1, *z2);
SWAP (*z2, *z3);
}
else
{
SWAP (*z0, *z2);
SWAP (*z0, *z1);
}
}
else
{
/* Otherwise, sort the real parts first */
if (GSL_REAL (*z0) > GSL_REAL (*z2))
{
SWAP (*z0, *z2);
SWAP (*z1, *z3);
}
/* Then sort by the imaginary parts */
if (GSL_IMAG (*z0) > GSL_IMAG (*z1)) SWAP (*z0, *z1);
if (GSL_IMAG (*z2) > GSL_IMAG (*z3)) SWAP (*z2, *z3);
}
}
else
{
/* 2 real roots. z2 and z3 are conjugates. */
/* Swap complex roots, if necessary. */
if (GSL_IMAG (*z2) > GSL_IMAG (*z3)) SWAP (*z2, *z3);
/* Sort real parts */
if (GSL_REAL (*z0) == GSL_REAL (*z2))
{
if (GSL_REAL (*z0) < GSL_REAL(*z1))
{
SWAP (*z1, *z3);
SWAP (*z1, *z2);
SWAP (*z0, *z1);
}
else if (GSL_REAL (*z0) == GSL_REAL (*z1))
{
SWAP (*z0,*z2);
}
else
{
SWAP (*z0, *z1);
SWAP (*z1, *z2);
}
}
else if (GSL_REAL (*z1) == GSL_REAL (*z2))
{
if (GSL_REAL (*z0) < GSL_REAL(*z1))
{
SWAP (*z1, *z2);
}
else
{
SWAP (*z0, *z3);
SWAP (*z0, *z2);
}
}
else
{
if (GSL_REAL (*z0) > GSL_REAL (*z1)) SWAP (*z0, *z1);
if (GSL_REAL (*z1) > GSL_REAL (*z2))
{
if (GSL_REAL (*z0) > GSL_REAL (*z2))
{
SWAP (*z0, *z2);
SWAP (*z1, *z3);
}
else
{
SWAP (*z1, *z2);
SWAP (*z2, *z3);
}
}
}
}
return 4;
}
| 12,534 | 26.855556 | 73 | c |
abess | abess-master/include/Spectra/DavidsonSymEigsSolver.h | // Copyright (C) 2020 Netherlands eScience Center <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
#define SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
#include <Eigen/Core>
#include "JDSymEigsBase.h"
#include "Util/SelectionRule.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implement the DPR correction for the Davidson algorithms.
/// The algorithms in the Davidson family only differ in how the correction
/// vectors are computed and optionally in the initial orthogonal basis set.
///
/// the DPR correction compute the new correction vector using the following expression:
/// \f[ correction = -(\boldsymbol{D} - \rho \boldsymbol{I})^{-1} \boldsymbol{r} \f]
/// where
/// \f$D\f$ is the diagonal of the target matrix, \f$\rho\f$ the Ritz eigenvalue,
/// \f$I\f$ the identity matrix and \f$r\f$ the residue vector.
///
template <typename OpType>
class DavidsonSymEigsSolver : public JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType>
{
private:
using Index = Eigen::Index;
using Scalar = typename OpType::Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
Vector m_diagonal;
public:
DavidsonSymEigsSolver(OpType& op, Index nev) :
JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType>(op, nev)
{
m_diagonal.resize(this->m_matrix_operator.rows());
for (Index i = 0; i < op.rows(); i++)
{
m_diagonal(i) = op(i, i);
}
}
/// Create initial search space based on the diagonal
/// and the spectrum'target (highest or lowest)
///
/// \param selection Spectrum section to target (e.g. lowest, etc.)
/// \return Matrix with the initial orthonormal basis
Matrix setup_initial_search_space(SortRule selection) const
{
std::vector<Eigen::Index> indices_sorted = argsort(selection, m_diagonal);
Matrix initial_basis = Matrix::Zero(this->m_matrix_operator.rows(), this->m_initial_search_space_size);
for (Index k = 0; k < this->m_initial_search_space_size; k++)
{
Index row = indices_sorted[k];
initial_basis(row, k) = 1.0;
}
return initial_basis;
}
/// Compute the corrections using the DPR method.
///
/// \return New correction vectors.
Matrix calculate_correction_vector() const
{
const Matrix& residues = this->m_ritz_pairs.residues();
const Vector& eigvals = this->m_ritz_pairs.ritz_values();
Matrix correction = Matrix::Zero(this->m_matrix_operator.rows(), this->m_correction_size);
for (Index k = 0; k < this->m_correction_size; k++)
{
Vector tmp = eigvals(k) - m_diagonal.array();
correction.col(k) = residues.col(k).array() / tmp.array();
}
return correction;
}
};
} // namespace Spectra
#endif // SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
| 3,169 | 33.835165 | 111 | h |
abess | abess-master/include/Spectra/GenEigsBase.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_BASE_H
#define SPECTRA_GEN_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <complex> // std::complex, std::conj, std::norm, std::abs
#include <stdexcept> // std::invalid_argument
#include "Util/Version.h"
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/DoubleShiftQR.h"
#include "LinAlg/UpperHessenbergEigen.h"
#include "LinAlg/Arnoldi.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for general eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as GenEigsSolver and GenEigsRealShiftSolver.
///
template <typename OpType, typename BOpType>
class GenEigsBase
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstVec = Eigen::Map<const Vector>;
using Complex = std::complex<Scalar>;
using ComplexMatrix = Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic>;
using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>;
using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>;
using ArnoldiFac = Arnoldi<Scalar, ArnoldiOpType>;
protected:
// clang-format off
OpType& m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
ArnoldiFac m_fac; // Arnoldi factorization
ComplexVector m_ritz_val; // Ritz values
ComplexMatrix m_ritz_vec; // Ritz vectors
ComplexVector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
private:
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
CompInfo m_info; // status of the computation
// clang-format on
// Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part
// Complex Ritz values have exact conjugate pairs
// So we use exact tests here
static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); }
static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); }
// Implicitly restarted Arnoldi factorization
void restart(Index k, SortRule selection)
{
using std::norm;
if (k >= m_ncv)
return;
DoubleShiftQR<Scalar> decomp_ds(m_ncv);
UpperHessenbergQR<Scalar> decomp_hb(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1]))
{
// H - mu * I = Q1 * R1
// H <- R1 * Q1 + mu * I = Q1' * H * Q1
// H - conj(mu) * I = Q2 * R2
// H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2
//
// (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R
const Scalar s = Scalar(2) * m_ritz_val[i].real();
const Scalar t = norm(m_ritz_val[i]);
decomp_ds.compute(m_fac.matrix_H(), s, t);
// Q -> Q * Qi
decomp_ds.apply_YQ(Q);
// H -> Q'HQ
// Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// decomp_ds.apply_YQ(Q);
// m_fac_H = Q.transpose() * m_fac_H * Q;
m_fac.compress_H(decomp_ds);
i++;
}
else
{
// QR decomposition of H - mu * I, mu is real
decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real());
// Q -> Q * Qi
decomp_hb.apply_YQ(Q);
// H -> Q'HQ = RQ + mu * I
m_fac.compress_H(decomp_hb);
}
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
}
// Calculates the number of converged Ritz values
Index num_converged(const Scalar& tol)
{
using std::pow;
// The machine precision, ~= 1e-16 for the "double" type
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
// std::pow() is not constexpr, so we do not declare eps23 to be constexpr
// But most compilers should be able to compute eps23 at compile time
const Scalar eps23 = pow(eps, Scalar(2) / 3);
// thresh = tol * max(eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.count();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < near_0)
nev_new++;
// Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK
nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 3)
nev_new = 2;
if (nev_new > m_ncv - 2)
nev_new = m_ncv - 2;
// Increase nev by one if ritz_val[nev - 1] and
// ritz_val[nev] are conjugate pairs
if (is_complex(m_ritz_val[nev_new - 1]) &&
is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new]))
{
nev_new++;
}
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair(SortRule selection)
{
UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H());
const ComplexVector& evals = decomp.eigenvalues();
ComplexMatrix evecs = decomp.eigenvectors();
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind;
switch (selection)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Complex, SortRule::LargestMagn> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::LargestReal:
{
SortEigenvalue<Complex, SortRule::LargestReal> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::LargestImag:
{
SortEigenvalue<Complex, SortRule::LargestImag> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestReal:
{
SortEigenvalue<Complex, SortRule::SmallestReal> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestImag:
{
SortEigenvalue<Complex, SortRule::SmallestImag> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported selection rule");
}
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(SortRule sort_rule)
{
std::vector<Index> ind;
switch (sort_rule)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Complex, SortRule::LargestMagn> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::LargestReal:
{
SortEigenvalue<Complex, SortRule::LargestReal> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::LargestImag:
{
SortEigenvalue<Complex, SortRule::LargestImag> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestReal:
{
SortEigenvalue<Complex, SortRule::SmallestReal> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestImag:
{
SortEigenvalue<Complex, SortRule::SmallestImag> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
ComplexVector new_ritz_val(m_ncv);
ComplexMatrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
GenEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 2)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix");
if (ncv < nev + 2 || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~GenEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Arnoldi factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param selection An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `SortRule::LargestMagn`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sorting Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `SortRule::LargestMagn`, `SortRule::LargestReal`,
/// `SortRule::LargestImag`, `SortRule::SmallestMagn`,
/// `SortRule::SmallestReal` and `SortRule::SmallestImag`,
/// for example `SortRule::LargestMagn` indicates that eigenvalues
/// with largest magnitude come first.
/// Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the parameter `selection`.
///
/// \return Number of converged eigenvalues.
///
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000,
Scalar tol = 1e-10, SortRule sorting = SortRule::LargestMagn)
{
// The m-step Arnoldi factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj, selection);
}
// Sorting results
sort_ritzpair(sorting);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging;
return (std::min)(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A complex-valued vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on
/// the template parameter `Scalar` defined.
///
ComplexVector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
ComplexVector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A complex-valued matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`,
/// depending on the template parameter `Scalar` defined.
///
ComplexMatrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = (std::min)(nvec, nconv);
ComplexMatrix res(m_n, nvec);
if (!nvec)
return res;
ComplexMatrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
ComplexMatrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_BASE_H
| 18,186 | 33.121951 | 105 | h |
abess | abess-master/include/Spectra/GenEigsComplexShiftSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#define SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenComplexShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a complex shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenComplexShiftSolve and
/// SparseGenComplexShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenComplexShiftSolve.
///
template <typename OpType = DenseGenComplexShiftSolve<double>>
class GenEigsComplexShiftSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Complex = std::complex<Scalar>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>;
using Base = GenEigsBase<OpType, IdentityBOp>;
using Base::m_op;
using Base::m_n;
using Base::m_nev;
using Base::m_fac;
using Base::m_ritz_val;
using Base::m_ritz_vec;
const Scalar m_sigmar;
const Scalar m_sigmai;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
using std::abs;
using std::sqrt;
using std::norm;
// The eigenvalues we get from the iteration is
// nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma)))
// So the eigenvalues of the original problem is
// 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2)
// lambda = sigmar + -----------------------------------
// 2 * nu
// We need to pick the correct root
// Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj
// and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj
// where r is any shift value.
// We can use this identity to determine lambdaj
//
// op(v) computes Re(inv(A - r * I) * v) for any real v
// If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj),
// then op(vj) = op(a) + op(b) * i
// By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine
// which one is the correct root
// Select a random shift value
SimpleRandom<Scalar> rng(0);
const Scalar shiftr = rng.random() * m_sigmar + rng.random();
const Complex shift = Complex(shiftr, Scalar(0));
m_op.set_shift(shiftr, Scalar(0));
// Calculate inv(A - r * I) * vj
Vector v_real(m_n), v_imag(m_n), OPv_real(m_n), OPv_imag(m_n);
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
for (Index i = 0; i < m_nev; i++)
{
v_real.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).real();
v_imag.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).imag();
m_op.perform_op(v_real.data(), OPv_real.data());
m_op.perform_op(v_imag.data(), OPv_imag.data());
// Two roots computed from the quadratic equation
const Complex nu = m_ritz_val[i];
const Complex root_part1 = m_sigmar + Scalar(0.5) / nu;
const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu;
const Complex root1 = root_part1 + root_part2;
const Complex root2 = root_part1 - root_part2;
// Test roots
Scalar err1 = Scalar(0), err2 = Scalar(0);
for (int k = 0; k < m_n; k++)
{
const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift);
const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift);
const Complex OPv = Complex(OPv_real[k], OPv_imag[k]);
err1 += norm(OPv - rhs1);
err2 += norm(OPv - rhs2);
}
const Complex lambdaj = (err1 < err2) ? root1 : root2;
m_ritz_val[i] = lambdaj;
if (abs(Eigen::numext::imag(lambdaj)) > eps)
{
m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj);
i++;
}
else
{
m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0));
}
}
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the complex shift-solve operation of \f$A\f$: calculating
/// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenComplexShiftSolve, or
/// define their own that implements all the public members
/// as in DenseGenComplexShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigmar The real part of the shift.
/// \param sigmai The imaginary part of the shift.
///
GenEigsComplexShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) :
Base(op, IdentityBOp(), nev, ncv),
m_sigmar(sigmar), m_sigmai(sigmai)
{
op.set_shift(m_sigmar, m_sigmai);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
| 6,755 | 41.225 | 120 | h |
abess | abess-master/include/Spectra/GenEigsRealShiftSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
#define SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenRealShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a real shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenRealShiftSolve and
/// SparseGenRealShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenRealShiftSolve.
///
template <typename OpType = DenseGenRealShiftSolve<double>>
class GenEigsRealShiftSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Complex = std::complex<Scalar>;
using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>;
using Base = GenEigsBase<OpType, IdentityBOp>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev) = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenRealShiftSolve, or
/// define their own that implements all the public members
/// as in DenseGenRealShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigma The real-valued shift.
///
GenEigsRealShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) :
Base(op, IdentityBOp(), nev, ncv),
m_sigma(sigma)
{
op.set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
| 3,518 | 39.918605 | 98 | h |
abess | abess-master/include/Spectra/GenEigsSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_SOLVER_H
#define SPECTRA_GEN_EIGS_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix.
///
/// Most of the background information documented in the SymEigsSolver class
/// also applies to the GenEigsSolver class here, except that the eigenvalues
/// and eigenvectors of a general matrix can now be complex-valued.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenMatProd and
/// SparseGenMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenMatProd.
///
/// An example that illustrates the usage of GenEigsSolver is give below:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/GenEigsSolver.h>
/// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10);
///
/// // Construct matrix operation object using the wrapper class
/// DenseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest
/// // (in magnitude, or norm) three eigenvalues
/// GenEigsSolver<DenseGenMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And also an example for sparse matrices:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/GenEigsSolver.h>
/// #include <Spectra/MatOp/SparseGenMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal,
/// // and 3 on the above-main subdiagonal
/// const int n = 10;
/// Eigen::SparseMatrix<double> M(n, n);
/// M.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// M.insert(i, i) = 1.0;
/// if (i > 0)
/// M.insert(i - 1, i) = 3.0;
/// if (i < n - 1)
/// M.insert(i + 1, i) = 2.0;
/// }
///
/// // Construct matrix operation object using the wrapper class SparseGenMatProd
/// SparseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// GenEigsSolver<SparseGenMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
template <typename OpType = DenseGenMatProd<double>>
class GenEigsSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that implements
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenMatProd, or
/// define their own that implements all the public members
/// as in DenseGenMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
///
GenEigsSolver(OpType& op, Index nev, Index ncv) :
GenEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_SOLVER_H
| 5,233 | 33.893333 | 96 | h |
abess | abess-master/include/Spectra/JDSymEigsBase.h | // Copyright (C) 2020 Netherlands eScience Center <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_JD_SYM_EIGS_BASE_H
#define SPECTRA_JD_SYM_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow
#include <algorithm> // std::min
#include <stdexcept> // std::invalid_argument
#include <iostream>
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "LinAlg/SearchSpace.h"
#include "LinAlg/RitzPairs.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric JD eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as DavidsonSymEigsSolver.
///
/// This class uses the CRTP method to call functions from the derived class.
///
template <typename Derived, typename OpType>
class JDSymEigsBase
{
protected:
using Index = Eigen::Index;
using Scalar = typename OpType::Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_matrix_operator; // object to conduct matrix operation,
// e.g. matrix-vector product
Index niter_ = 0;
const Index m_number_eigenvalues; // number of eigenvalues requested
Index m_max_search_space_size;
Index m_initial_search_space_size;
Index m_correction_size; // how many correction vectors are added in each iteration
RitzPairs<Scalar> m_ritz_pairs; // Ritz eigen pair structure
SearchSpace<Scalar> m_search_space; // search space
private:
CompInfo m_info = CompInfo::NotComputed; // status of the computation
void check_argument() const
{
if (m_number_eigenvalues < 1 || m_number_eigenvalues > m_matrix_operator.cols() - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
}
public:
JDSymEigsBase(OpType& op, Index nev) :
m_matrix_operator(op),
m_number_eigenvalues(nev),
m_max_search_space_size(10 * m_number_eigenvalues),
m_initial_search_space_size(2 * m_number_eigenvalues),
m_correction_size(m_number_eigenvalues)
{
check_argument();
//TODO better input validation and checks
if (op.cols() < m_max_search_space_size)
{
m_max_search_space_size = op.cols();
}
if (op.cols() < m_initial_search_space_size + m_correction_size)
{
m_initial_search_space_size = op.cols() / 3;
m_correction_size = op.cols() / 3;
}
}
///
/// Sets the Maxmium SearchspaceSize after which is deflated
///
void set_max_search_space_size(Index max_search_space_size)
{
m_max_search_space_size = max_search_space_size;
}
///
/// Sets how many correction vectors are added in each iteration
///
void set_correction_size(Index correction_size)
{
m_correction_size = correction_size;
}
///
/// Sets the Initial SearchspaceSize for Ritz values
///
void set_initial_search_space_size(Index initial_search_space_size)
{
m_initial_search_space_size = initial_search_space_size;
}
///
/// Virtual destructor
///
virtual ~JDSymEigsBase() {}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return niter_; }
Vector eigenvalues() const { return m_ritz_pairs.ritz_values().head(m_number_eigenvalues); }
Matrix eigenvectors() const { return m_ritz_pairs.ritz_vectors().leftCols(m_number_eigenvalues); }
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 100,
Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision())
{
Derived& derived = static_cast<Derived&>(*this);
Matrix intial_space = derived.setup_initial_search_space(selection);
return compute_with_guess(intial_space, selection, maxit, tol);
}
Index compute_with_guess(const Eigen::Ref<const Matrix>& initial_space,
SortRule selection = SortRule::LargestMagn,
Index maxit = 100,
Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision())
{
m_search_space.initialize_search_space(initial_space);
niter_ = 0;
for (niter_ = 0; niter_ < maxit; niter_++)
{
bool do_restart = (m_search_space.size() > m_max_search_space_size);
if (do_restart)
{
m_search_space.restart(m_ritz_pairs, m_initial_search_space_size);
}
m_search_space.update_operator_basis_product(m_matrix_operator);
Eigen::ComputationInfo small_problem_info = m_ritz_pairs.compute_eigen_pairs(m_search_space);
if (small_problem_info != Eigen::ComputationInfo::Success)
{
m_info = CompInfo::NumericalIssue;
break;
}
m_ritz_pairs.sort(selection);
bool converged = m_ritz_pairs.check_convergence(tol, m_number_eigenvalues);
if (converged)
{
m_info = CompInfo::Successful;
break;
}
else if (niter_ == maxit - 1)
{
m_info = CompInfo::NotConverging;
break;
}
Derived& derived = static_cast<Derived&>(*this);
Matrix corr_vect = derived.calculate_correction_vector();
m_search_space.extend_basis(corr_vect);
}
return (m_ritz_pairs.converged_eigenvalues()).template cast<Index>().head(m_number_eigenvalues).sum();
}
};
} // namespace Spectra
#endif // SPECTRA_JD_SYM_EIGS_BASE_H
| 6,303 | 33.26087 | 110 | h |
abess | abess-master/include/Spectra/SymEigsBase.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_BASE_H
#define SPECTRA_SYM_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow
#include <algorithm> // std::min
#include <stdexcept> // std::invalid_argument
#include <utility> // std::move
#include "Util/Version.h"
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/TridiagEigen.h"
#include "LinAlg/Lanczos.h"
namespace Spectra {
///
/// \defgroup EigenSolver Eigen Solvers
///
/// Eigen solvers for different types of problems.
///
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as SymEigsSolver and SymEigsShiftSolver.
///
template <typename OpType, typename BOpType>
class SymEigsBase
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstVec = Eigen::Map<const Vector>;
using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>;
using LanczosFac = Lanczos<Scalar, ArnoldiOpType>;
protected:
// clang-format off
// In SymEigsSolver and SymEigsShiftSolver, the A operator is an lvalue provided by
// the user. In SymGEigsSolver, the A operator is an rvalue. To avoid copying objects,
// we use the following scheme:
// 1. If the op parameter in the constructor is an lvalue, make m_op a const reference to op
// 2. If op is an rvalue, move op to m_op_container, and then make m_op a const
// reference to m_op_container[0]
std::vector<OpType> m_op_container;
const OpType& m_op; // matrix operator for A
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Lanczos method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
LanczosFac m_fac; // Lanczos factorization
Vector m_ritz_val; // Ritz values
private:
Matrix m_ritz_vec; // Ritz vectors
Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
CompInfo m_info; // status of the computation
// clang-format on
// Move rvalue object to the container
static std::vector<OpType> create_op_container(OpType&& rval)
{
std::vector<OpType> container;
container.emplace_back(std::move(rval));
return container;
}
// Implicitly restarted Lanczos factorization
void restart(Index k, SortRule selection)
{
using std::abs;
if (k >= m_ncv)
return;
TridiagQR<Scalar> decomp(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// Apply large shifts first
const int nshift = m_ncv - k;
Vector shifts = m_ritz_val.tail(nshift);
std::sort(shifts.data(), shifts.data() + nshift, [](const Scalar& v1, const Scalar& v2) { return abs(v1) > abs(v2); });
for (Index i = 0; i < nshift; i++)
{
// QR decomposition of H-mu*I, mu is the shift
decomp.compute(m_fac.matrix_H(), shifts[i]);
// Q -> Q * Qi
decomp.apply_YQ(Q);
// H -> Q'HQ
// Since QR = H - mu * I, we have H = QR + mu * I
// and therefore Q'HQ = RQ + mu * I
m_fac.compress_H(decomp);
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
}
// Calculates the number of converged Ritz values
Index num_converged(const Scalar& tol)
{
using std::pow;
// The machine precision, ~= 1e-16 for the "double" type
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
// std::pow() is not constexpr, so we do not declare eps23 to be constexpr
// But most compilers should be able to compute eps23 at compile time
const Scalar eps23 = pow(eps, Scalar(2) / 3);
// thresh = tol * max(eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.count();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < near_0)
nev_new++;
// Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK
nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 2)
nev_new = 2;
if (nev_new > m_ncv - 1)
nev_new = m_ncv - 1;
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair(SortRule selection)
{
TridiagEigen<Scalar> decomp(m_fac.matrix_H());
const Vector& evals = decomp.eigenvalues();
const Matrix& evecs = decomp.eigenvectors();
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind = argsort(selection, evals, m_ncv);
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(SortRule sort_rule)
{
if ((sort_rule != SortRule::LargestAlge) && (sort_rule != SortRule::LargestMagn) &&
(sort_rule != SortRule::SmallestAlge) && (sort_rule != SortRule::SmallestMagn))
throw std::invalid_argument("unsupported sorting rule");
std::vector<Index> ind = argsort(sort_rule, m_ritz_val, m_nev);
Vector new_ritz_val(m_ncv);
Matrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
// If op is an lvalue
SymEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) :
m_op(op),
m_n(op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
// If op is an rvalue
SymEigsBase(OpType&& op, const BOpType& Bop, Index nev, Index ncv) :
m_op_container(create_op_container(std::move(op))),
m_op(m_op_container.front()),
m_n(m_op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(m_op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~SymEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Lanczos factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param selection An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `SortRule::LargestMagn`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sorting Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `SortRule::LargestAlge`, `SortRule::LargestMagn`,
/// `SortRule::SmallestAlge`, and `SortRule::SmallestMagn`.
/// For example, `SortRule::LargestAlge` indicates that largest eigenvalues
/// come first. Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the parameter `selection`.
///
/// \return Number of converged eigenvalues.
///
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000,
Scalar tol = 1e-10, SortRule sorting = SortRule::LargestAlge)
{
// The m-step Lanczos factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj, selection);
}
// Sorting results
sort_ritzpair(sorting);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging;
return (std::min)(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Vector eigenvalues() const
{
const Index nconv = m_ritz_conv.count();
Vector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual Matrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.count();
nvec = (std::min)(nvec, nconv);
Matrix res(m_n, nvec);
if (!nvec)
return res;
Matrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
virtual Matrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_BASE_H
| 15,129 | 32.325991 | 127 | h |
abess | abess-master/include/Spectra/SymEigsShiftSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
#define SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices using
/// the **shift-and-invert mode**. The background information of the symmetric
/// eigen solver is documented in the SymEigsSolver class. Here we focus on
/// explaining the shift-and-invert mode.
///
/// The shift-and-invert mode is based on the following fact:
/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of
/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$,
/// we have
/// \f[(A-\sigma I)^{-1}x=\nu x\f]
/// where
/// \f[\nu=\frac{1}{\lambda-\sigma}\f]
/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix
/// \f$(A-\sigma I)^{-1}\f$.
///
/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$
/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired
/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting
/// that \f$\lambda=\sigma+\nu^{-1}\f$.
///
/// The reason why we need this type of manipulation is that
/// the algorithm of **Spectra** (and also **ARPACK**)
/// is good at finding eigenvalues with large magnitude, but may fail in looking
/// for eigenvalues that are close to zero. However, if we really need them, we
/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then
/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$
/// implies smallest values of \f$\lambda\f$.
///
/// To summarize, in the shift-and-invert mode, the selection rule will apply to
/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule
/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of
/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues()
/// method will always return the eigenvalues in the original problem (i.e.,
/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the
/// same for both the original problem and the shifted-and-inverted problem.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymShiftSolve and
/// SparseSymShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymShiftSolve.
///
/// Below is an example that illustrates the use of the shift-and-invert mode:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A size-10 diagonal matrix with elements 1, 2, ..., 10
/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10);
/// for (int i = 0; i < M.rows(); i++)
/// M(i, i) = i + 1;
///
/// // Construct matrix operation object using the wrapper class
/// DenseSymShiftSolve<double> op(M);
///
/// // Construct eigen solver object with shift 0
/// // This will find eigenvalues that are closest to 0
/// SymEigsShiftSolver<DenseSymShiftSolve<double>> eigs(op, 3, 6, 0.0);
///
/// eigs.init();
/// eigs.compute(SortRule::LargestMagn);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (3.0, 2.0, 1.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
/// Also an example for user-supplied matrix shift-solve operation class:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTenShiftSolve
/// {
/// private:
/// double sigma_;
/// public:
/// using Scalar = double; // A typedef named "Scalar" is required
/// int rows() { return 10; }
/// int cols() { return 10; }
/// void set_shift(double sigma) { sigma_ = sigma; }
/// // y_out = inv(A - sigma * I) * x_in
/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...)
/// void perform_op(double *x_in, double *y_out) const
/// {
/// for (int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] / (i + 1 - sigma_);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTenShiftSolve op;
/// // Find three eigenvalues that are closest to 3.14
/// SymEigsShiftSolver<MyDiagonalTenShiftSolve> eigs(op, 3, 6, 3.14);
/// eigs.init();
/// eigs.compute(SortRule::LargestMagn);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (4.0, 3.0, 2.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename OpType = DenseSymShiftSolve<double>>
class SymEigsShiftSolver : public SymEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using Base = SymEigsBase<OpType, IdentityBOp>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymShiftSolve, or
/// define their own that implements all the public members
/// as in DenseSymShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv_` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymEigsShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) :
Base(op, IdentityBOp(), nev, ncv),
m_sigma(sigma)
{
op.set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
| 7,762 | 37.621891 | 98 | h |
abess | abess-master/include/Spectra/SymEigsSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_SOLVER_H
#define SPECTRA_SYM_EIGS_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric.
///
/// **Spectra** is designed to calculate a specified number (\f$k\f$)
/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much
/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues
/// and eigenvectors are computed.
///
/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires
/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of
/// this solver need to supply a class that computes the result of \f$Av\f$
/// for any given vector \f$v\f$. The name of this class should be given to
/// the template parameter `OpType`, and instance of this class passed to
/// the constructor of SymEigsSolver.
///
/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**,
/// for example `Eigen::MatrixXd`, then there is an easy way to construct such a
/// matrix operation class, by using the built-in wrapper class DenseSymMatProd
/// that wraps an existing matrix object in **Eigen**. This is also the
/// default template parameter for SymEigsSolver. For sparse matrices, the
/// wrapper class SparseSymMatProd can be used similarly.
///
/// If the users need to define their own matrix-vector multiplication operation
/// class, it should define a public type `Scalar` to indicate the element type,
/// and implement all the public member functions as in DenseSymMatProd.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10);
/// Eigen::MatrixXd M = A + A.transpose();
///
/// // Construct matrix operation object using the wrapper class DenseSymMatProd
/// DenseSymMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// SymEigsSolver<DenseSymMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And here is an example for user-supplied matrix operation class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTen
/// {
/// public:
/// using Scalar = double; // A typedef named "Scalar" is required
/// int rows() { return 10; }
/// int cols() { return 10; }
/// // y_out = M * x_in
/// void perform_op(double *x_in, double *y_out) const
/// {
/// for (int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] * (i + 1);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTen op;
/// SymEigsSolver<MyDiagonalTen> eigs(op, 3, 6);
/// eigs.init();
/// eigs.compute(SortRule::LargestAlge);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (10, 9, 8)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename OpType = DenseSymMatProd<double>>
class SymEigsSolver : public SymEigsBase<OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that implements
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymEigsSolver(OpType& op, Index nev, Index ncv) :
SymEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_SOLVER_H
| 6,053 | 35.690909 | 96 | h |
abess | abess-master/include/Spectra/SymGEigsShiftSolver.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
#define SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
#include <utility> // std::move
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsShiftInvertOp.h"
#include "MatOp/internal/SymGEigsBucklingOp.h"
#include "MatOp/internal/SymGEigsCayleyOp.h"
namespace Spectra {
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ and \f$B\f$ are symmetric
/// matrices. A spectral transform is applied to seek interior
/// generalized eigenvalues with respect to some shift \f$\sigma\f$.
///
/// There are different modes of this solver, specified by the template parameter `Mode`.
/// See the pages for the specialized classes for details.
/// - The shift-and-invert mode transforms the problem into \f$(A-\sigma B)^{-1}Bx=\nu x\f$,
/// where \f$\nu=1/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert>
/// "SymGEigsShiftSolver (Shift-and-invert mode)" for more details.
/// - The buckling mode transforms the problem into \f$(A-\sigma B)^{-1}Ax=\nu x\f$,
/// where \f$\nu=\lambda/(\lambda-\sigma)\f$. This mode assumes that \f$A\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling>
/// "SymGEigsShiftSolver (Buckling mode)" for more details.
/// - The Cayley mode transforms the problem into \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$,
/// where \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley>
/// "SymGEigsShiftSolver (Cayley mode)" for more details.
// Empty class template
template <typename OpType, typename BOpType, GEigsMode Mode>
class SymGEigsShiftSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using the shift-and-invert spectral transformation. The original problem is
/// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite.
/// The transformed problem is \f$(A-\sigma B)^{-1}Bx=\nu x\f$, where
/// \f$\nu=1/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::ShiftInvert.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/SymGEigsShiftSolver.h>
/// #include <Spectra/MatOp/SymShiftInvert.h>
/// #include <Spectra/MatOp/SparseSymMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem
/// // A * x = lambda * B * x,
/// // where A is symmetric and B is positive definite
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a tridiagonal matrix with 2 on the diagonal
/// // and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if (i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation objects using the wrapper classes
/// // A is dense, B is sparse
/// using OpType = SymShiftInvert<double, Eigen::Dense, Eigen::Sparse>;
/// using BOpType = SparseSymMatProd<double>;
/// OpType op(A, B);
/// BOpType Bop(B);
///
/// // Construct generalized eigen solver object, seeking three generalized
/// // eigenvalues that are closest to zero. This is equivalent to specifying
/// // a shift sigma = 0.0 combined with the SortRule::LargestMagn selection rule
/// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert>
/// geigs(op, Bop, 3, 6, 0.0);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl;
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::ShiftInvert
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert> :
public SymEigsBase<SymGEigsShiftInvertOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsShiftInvertOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Bv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the buckling mode. The original problem is
/// to solve \f$Kx=\lambda K_G x\f$, where \f$K\f$ is positive definite and \f$K_G\f$ is symmetric.
/// The transformed problem is \f$(K-\sigma K_G)^{-1}Kx=\nu x\f$, where
/// \f$\nu=\lambda/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(K-\sigma K_G)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Kv\f$.
///
/// If \f$K\f$ and \f$K_G\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$K\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Buckling.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/SymGEigsShiftSolver.h>
/// #include <Spectra/MatOp/SymShiftInvert.h>
/// #include <Spectra/MatOp/SparseSymMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem
/// // K * x = lambda * KG * x,
/// // where K is positive definite, and KG is symmetric
/// const int n = 100;
///
/// // Define the K matrix, a tridiagonal matrix with 2 on the diagonal
/// // and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> K(n, n);
/// K.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// K.insert(i, i) = 2.0;
/// if (i > 0)
/// K.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// K.insert(i + 1, i) = 1.0;
/// }
///
/// // Define the KG matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd KG = M + M.transpose();
///
/// // Construct matrix operation objects using the wrapper classes
/// // K is sparse, KG is dense
/// using OpType = SymShiftInvert<double, Eigen::Sparse, Eigen::Dense>;
/// using BOpType = SparseSymMatProd<double>;
/// OpType op(K, KG);
/// BOpType Bop(K);
///
/// // Construct generalized eigen solver object, seeking three generalized
/// // eigenvalues that are closest to and larger than 1.0. This is equivalent to
/// // specifying a shift sigma = 1.0 combined with the SortRule::LargestAlge
/// // selection rule
/// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling>
/// geigs(op, Bop, 3, 6, 1.0);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl;
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::Buckling
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling> :
public SymEigsBase<SymGEigsBucklingOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsBucklingOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
if (sigma == Scalar(0))
throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the buckling mode");
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = lambda / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = sigma * nu / (nu - 1)
m_ritz_val.head(m_nev).array() = m_sigma * m_ritz_val.head(m_nev).array() /
(m_ritz_val.head(m_nev).array() - Scalar(1));
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(K-\sigma K_G)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$K\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Kv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$K\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using the Cayley spectral transformation. The original problem is
/// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite.
/// The transformed problem is \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$, where
/// \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Cayley.
// Partial specialization for mode = GEigsMode::Cayley
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley> :
public SymEigsBase<SymGEigsCayleyOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsCayleyOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
if (sigma == Scalar(0))
throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the Cayley mode");
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = (lambda + sigma) / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = sigma * (nu + 1) / (nu - 1)
m_ritz_val.head(m_nev).array() = m_sigma * (m_ritz_val.head(m_nev).array() + Scalar(1)) /
(m_ritz_val.head(m_nev).array() - Scalar(1));
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Bv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
| 21,455 | 45.241379 | 109 | h |
abess | abess-master/include/Spectra/SymGEigsSolver.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SOLVER_H
#define SPECTRA_SYM_GEIGS_SOLVER_H
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsCholeskyOp.h"
#include "MatOp/internal/SymGEigsRegInvOp.h"
namespace Spectra {
///
/// \defgroup GEigenSolver Generalized Eigen Solvers
///
/// Generalized eigen solvers for different types of problems.
///
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and
/// \f$B\f$ is positive definite.
///
/// There are two modes of this solver, specified by the template parameter `Mode`.
/// See the pages for the specialized classes for details.
/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky
/// decomposition, which is the preferred mode when the decomposition is
/// available. (This can be easily done in Eigen using the dense or sparse
/// Cholesky solver.)
/// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> "SymGEigsSolver (Cholesky mode)" for more details.
/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the
/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be
/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or
/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition.
/// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> "SymGEigsSolver (Regular inverse mode)" for more details.
// Empty class template
template <typename OpType, typename BOpType, GEigsMode Mode>
class SymGEigsSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky
/// decomposition \f$B=LL'\f$.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower
/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the DenseCholesky or SparseCholesky
/// classes. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseCholesky and
/// SparseCholesky, or define their own that implements all the
/// public member functions as in DenseCholesky.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Cholesky.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Eigen/Eigenvalues>
/// #include <Spectra/SymGEigsSolver.h>
/// #include <Spectra/MatOp/DenseSymMatProd.h>
/// #include <Spectra/MatOp/SparseCholesky.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if (i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation objects using the wrapper classes
/// DenseSymMatProd<double> op(A);
/// SparseCholesky<double> Bop(B);
///
/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues
/// SymGEigsSolver<DenseSymMatProd<double>, SparseCholesky<double>, GEigsMode::Cholesky>
/// geigs(op, Bop, 3, 6);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// // Verify results using the generalized eigen solver in Eigen
/// Eigen::MatrixXd Bdense = B;
/// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense);
///
/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl;
/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::Cholesky
template <typename OpType, typename BOpType>
class SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> :
public SymEigsBase<SymGEigsCholeskyOp<OpType, BOpType>, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsCholeskyOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, IdentityBOp>;
const BOpType& m_Bop;
public:
///
/// Constructor to create a solver object.
///
/// \param op The \f$A\f$ matrix operation object that implements the matrix-vector
/// multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param Bop The \f$B\f$ matrix operation object that represents a Cholesky decomposition of \f$B\f$.
/// It should implement the lower and upper triangular solving operations:
/// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector
/// \f$v\f$, where \f$LL'=B\f$. Users could either
/// create the object from the wrapper classes such as DenseCholesky, or
/// define their own that implements all the public member functions
/// as in DenseCholesky. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) :
Base(ModeMatOp(op, Bop), IdentityBOp(), nev, ncv),
m_Bop(Bop)
{}
/// \cond
Matrix eigenvectors(Index nvec) const override
{
Matrix res = Base::eigenvectors(nvec);
Vector tmp(res.rows());
const Index nconv = res.cols();
for (Index i = 0; i < nconv; i++)
{
m_Bop.upper_triangular_solve(&res(0, i), tmp.data());
res.col(i).noalias() = tmp;
}
return res;
}
Matrix eigenvectors() const override
{
return SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky>::eigenvectors(this->m_nev);
}
/// \endcond
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations
/// defined below.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the
/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the SparseRegularInverse class. There is no
/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode
/// is always preferred. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper class SparseRegularInverse, or define their
/// own that implements all the public member functions as in
/// SparseRegularInverse.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::RegularInverse.
///
// Partial specialization for mode = GEigsMode::RegularInverse
template <typename OpType, typename BOpType>
class SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> :
public SymEigsBase<SymGEigsRegInvOp<OpType, BOpType>, BOpType>
{
private:
using Index = Eigen::Index;
using ModeMatOp = SymGEigsRegInvOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
public:
///
/// Constructor to create a solver object.
///
/// \param op The \f$A\f$ matrix operation object that implements the matrix-vector
/// multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param Bop The \f$B\f$ matrix operation object that implements the multiplication operation
/// \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$ for any vector \f$v\f$.
/// Users could either create the object from the wrapper class SparseRegularInverse, or
/// define their own that implements all the public member functions
/// as in SparseRegularInverse. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) :
Base(ModeMatOp(op, Bop), Bop, nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SOLVER_H
| 13,190 | 44.329897 | 131 | h |
abess | abess-master/include/Spectra/LinAlg/Arnoldi.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ARNOLDI_H
#define SPECTRA_ARNOLDI_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <utility> // std::move
#include <stdexcept> // std::invalid_argument
#include "../MatOp/internal/ArnoldiOp.h"
#include "../Util/TypeTraits.h"
#include "../Util/SimpleRandom.h"
#include "UpperHessenbergQR.h"
#include "DoubleShiftQR.h"
namespace Spectra {
// Arnoldi factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Arnoldi
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapVec = Eigen::Map<Vector>;
using MapConstMat = Eigen::Map<const Matrix>;
using MapConstVec = Eigen::Map<const Vector>;
protected:
// A very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10);
// The machine precision, ~= 1e-16 for the "double" type
static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon();
ArnoldiOpType m_op; // Operators for the Arnoldi factorization
const Index m_n; // dimension of A
const Index m_m; // maximum dimension of subspace V
Index m_k; // current dimension of subspace V
Matrix m_fac_V; // V matrix in the Arnoldi factorization
Matrix m_fac_H; // H matrix in the Arnoldi factorization
Vector m_fac_f; // residual in the Arnoldi factorization
Scalar m_beta; // ||f||, B-norm of f
// Given orthonormal basis V (w.r.t. B), find a nonzero vector f such that V'Bf = 0
// With rounding errors, we hope V'B(f/||f||) < eps
// Assume that f has been properly allocated
void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm, Index& op_counter)
{
using std::sqrt;
Vector v(m_n), Vf(V.cols());
for (Index iter = 0; iter < 5; iter++)
{
// Randomly generate a new vector and orthogonalize it against V
SimpleRandom<Scalar> rng(seed + 123 * iter);
// The first try forces f to be in the range of A
if (iter == 0)
{
rng.random_vec(v);
m_op.perform_op(v.data(), f.data());
op_counter++;
}
else
{
rng.random_vec(f);
}
// f <- f - V * V'Bf, so that f is orthogonal to V in B-norm
m_op.trans_product(V, f, Vf);
f.noalias() -= V * Vf;
// fnorm <- ||f||
fnorm = m_op.norm(f);
// Compute V'Bf again
m_op.trans_product(V, f, Vf);
// Test whether V'B(f/||f||) < eps
Scalar ortho_err = Vf.cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 3 && ortho_err >= m_eps * fnorm)
{
// f <- f - V * Vf
f.noalias() -= V * Vf;
// beta <- ||f||
fnorm = m_op.norm(f);
m_op.trans_product(V, f, Vf);
ortho_err = Vf.cwiseAbs().maxCoeff();
count++;
}
// If the condition is satisfied, simply return
// Otherwise, go to the next iteration and try a new random vector
if (ortho_err < m_eps * fnorm)
return;
}
}
public:
// Copy an ArnoldiOp
Arnoldi(const ArnoldiOpType& op, Index m) :
m_op(op), m_n(op.rows()), m_m(m), m_k(0)
{}
// Move an ArnoldiOp
Arnoldi(ArnoldiOpType&& op, Index m) :
m_op(std::move(op)), m_n(op.rows()), m_m(m), m_k(0)
{}
// Const-reference to internal structures
const Matrix& matrix_V() const { return m_fac_V; }
const Matrix& matrix_H() const { return m_fac_H; }
const Vector& vector_f() const { return m_fac_f; }
Scalar f_norm() const { return m_beta; }
Index subspace_dim() const { return m_k; }
// Initialize with an operator and an initial vector
void init(MapConstVec& v0, Index& op_counter)
{
m_fac_V.resize(m_n, m_m);
m_fac_H.resize(m_m, m_m);
m_fac_f.resize(m_n);
m_fac_H.setZero();
// Verify the initial vector
const Scalar v0norm = m_op.norm(v0);
if (v0norm < m_near_0)
throw std::invalid_argument("initial residual vector cannot be zero");
// Points to the first column of V
MapVec v(m_fac_V.data(), m_n);
// Force v to be in the range of A, i.e., v = A * v0
m_op.perform_op(v0.data(), v.data());
op_counter++;
// Normalize
const Scalar vnorm = m_op.norm(v);
v /= vnorm;
// Compute H and f
Vector w(m_n);
m_op.perform_op(v.data(), w.data());
op_counter++;
m_fac_H(0, 0) = m_op.inner_product(v, w);
m_fac_f.noalias() = w - v * m_fac_H(0, 0);
// In some cases f is zero in exact arithmetics, but due to rounding errors
// it may contain tiny fluctuations. When this happens, we force f to be zero
if (m_fac_f.cwiseAbs().maxCoeff() < m_eps)
{
m_fac_f.setZero();
m_beta = Scalar(0);
}
else
{
m_beta = m_op.norm(m_fac_f);
}
// Indicate that this is a step-1 factorization
m_k = 1;
}
// Arnoldi factorization starting from step-k
virtual void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::string msg = "Arnoldi: from_k (= " + std::to_string(from_k) +
") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")";
throw std::invalid_argument(msg);
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter);
restart = true;
}
// v <- f / ||f||
m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v, v = m_fac_V.col(i)
m_op.perform_op(&m_fac_V(0, i), w.data());
op_counter++;
const Index i1 = i + 1;
// First i+1 columns of V
MapConstMat Vs(m_fac_V.data(), m_n, i1);
// h = m_fac_H(0:i, i)
MapVec h(&m_fac_H(0, i), i1);
// h <- V'Bw
m_op.trans_product(Vs, w, h);
// f <- w - V * h
m_fac_f.noalias() = w - Vs * h;
m_beta = m_op.norm(m_fac_f);
if (m_beta > Scalar(0.717) * m_op.norm(h))
continue;
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
h.noalias() += Vf.head(i1);
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a double shift QR decomposition
void compress_H(const DoubleShiftQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k -= 2;
}
// Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition
void compress_H(const UpperHessenbergQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
// Apply V -> VQ and compute the new f.
// Should be called after compress_H(), since m_k is updated there.
// Only need to update the first k+1 columns of V
// The first (m - k + i) elements of the i-th column of Q are non-zero,
// and the rest are zero
void compress_V(const Matrix& Q)
{
Matrix Vs(m_n, m_k + 1);
for (Index i = 0; i < m_k; i++)
{
const Index nnz = m_m - m_k + i + 1;
MapConstVec q(&Q(0, i), nnz);
Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q;
}
Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k);
m_fac_V.leftCols(m_k + 1).noalias() = Vs;
Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1);
m_fac_f.swap(fk);
m_beta = m_op.norm(m_fac_f);
}
};
} // namespace Spectra
#endif // SPECTRA_ARNOLDI_H
| 10,914 | 33.541139 | 100 | h |
abess | abess-master/include/Spectra/LinAlg/DoubleShiftQR.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DOUBLE_SHIFT_QR_H
#define SPECTRA_DOUBLE_SHIFT_QR_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <algorithm> // std::min, std::fill, std::copy
#include <utility> // std::swap
#include <cmath> // std::abs, std::sqrt, std::pow
#include <stdexcept> // std::invalid_argument, std::logic_error
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class DoubleShiftQR
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Matrix3X = Eigen::Matrix<Scalar, 3, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using IntArray = Eigen::Array<unsigned char, Eigen::Dynamic, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
// A very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10);
// The machine precision, ~= 1e-16 for the "double" type
static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon();
Index m_n; // Dimension of the matrix
Matrix m_mat_H; // A copy of the matrix to be factorized
Scalar m_shift_s; // Shift constant
Scalar m_shift_t; // Shift constant
Matrix3X m_ref_u; // Householder reflectors
IntArray m_ref_nr; // How many rows does each reflector affects
// 3 - A general reflector
// 2 - A Givens rotation
// 1 - An identity transformation
bool m_computed; // Whether matrix has been factorized
// Compute sqrt(x1^2 + x2^2 + x3^2) wit high precision
static Scalar stable_norm3(Scalar x1, Scalar x2, Scalar x3)
{
using std::abs;
using std::sqrt;
x1 = abs(x1);
x2 = abs(x2);
x3 = abs(x3);
// Make x1 >= {x2, x3}
if (x1 < x2)
std::swap(x1, x2);
if (x1 < x3)
std::swap(x1, x3);
// If x1 is too small, return 0
if (x1 < m_near_0)
return Scalar(0);
const Scalar r2 = x2 / x1, r3 = x3 / x1;
// We choose a cutoff such that cutoff^4 < eps
// If max(r2, r3) > cutoff, use the standard way; otherwise use Taylor series expansion
// to avoid an explicit sqrt() call that may lose precision
const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25));
Scalar r = r2 * r2 + r3 * r3;
r = (r2 >= cutoff || r3 >= cutoff) ?
sqrt(Scalar(1) + r) :
(Scalar(1) + r * (Scalar(0.5) - Scalar(0.125) * r)); // sqrt(1 + t) ~= 1 + t/2 - t^2/8
return x1 * r;
}
// x[i] <- x[i] / r, r = sqrt(x1^2 + x2^2 + x3^2)
// Assume |x1| >= {|x2|, |x3|}, x1 != 0
static void stable_scaling(Scalar& x1, Scalar& x2, Scalar& x3)
{
using std::abs;
using std::pow;
using std::sqrt;
const Scalar x1sign = (x1 > Scalar(0)) ? Scalar(1) : Scalar(-1);
x1 = abs(x1);
// Use the same method as in stable_norm3()
const Scalar r2 = x2 / x1, r3 = x3 / x1;
const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25));
Scalar r = r2 * r2 + r3 * r3;
// r = 1/sqrt(1 + r2^2 + r3^2)
r = (abs(r2) >= cutoff || abs(r3) >= cutoff) ?
Scalar(1) / sqrt(Scalar(1) + r) :
(Scalar(1) - r * (Scalar(0.5) - Scalar(0.375) * r)); // 1/sqrt(1 + t) ~= 1 - t * (1/2 - (3/8) * t)
x1 = x1sign * r;
x2 = r2 * r;
x3 = r3 * r;
}
void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind)
{
using std::abs;
Scalar* u = &m_ref_u.coeffRef(0, ind);
unsigned char* nr = m_ref_nr.data();
const Scalar x2m = abs(x2), x3m = abs(x3);
// If both x2 and x3 are zero, nr is 1, and we early exit
if (x2m < m_near_0 && x3m < m_near_0)
{
nr[ind] = 1;
return;
}
// In general case the reflector affects 3 rows
// If x3 is zero, decrease nr by 1
nr[ind] = (x3m < m_near_0) ? 2 : 3;
const Scalar x_norm = (x3m < m_near_0) ? Eigen::numext::hypot(x1, x2) : stable_norm3(x1, x2, x3);
// x1' = x1 - rho * ||x||
// rho = -sign(x1), if x1 == 0, we choose rho = 1
const Scalar rho = (x1 <= Scalar(0)) - (x1 > Scalar(0));
const Scalar x1_new = x1 - rho * x_norm, x1m = abs(x1_new);
// Copy x to u
u[0] = x1_new;
u[1] = x2;
u[2] = x3;
if (x1m >= x2m && x1m >= x3m)
{
stable_scaling(u[0], u[1], u[2]);
}
else if (x2m >= x1m && x2m >= x3m)
{
stable_scaling(u[1], u[0], u[2]);
}
else
{
stable_scaling(u[2], u[0], u[1]);
}
}
void compute_reflector(const Scalar* x, Index ind)
{
compute_reflector(x[0], x[1], x[2], ind);
}
// Update the block X = H(il:iu, il:iu)
void update_block(Index il, Index iu)
{
// Block size
const Index bsize = iu - il + 1;
// If block size == 1, there is no need to apply reflectors
if (bsize == 1)
{
m_ref_nr.coeffRef(il) = 1;
return;
}
const Scalar x00 = m_mat_H.coeff(il, il),
x01 = m_mat_H.coeff(il, il + 1),
x10 = m_mat_H.coeff(il + 1, il),
x11 = m_mat_H.coeff(il + 1, il + 1);
// m00 = x00 * (x00 - s) + x01 * x10 + t
const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t;
// m10 = x10 * (x00 + x11 - s)
const Scalar m10 = x10 * (x00 + x11 - m_shift_s);
// For block size == 2, do a Givens rotation on M = X * X - s * X + t * I
if (bsize == 2)
{
// This causes nr=2
compute_reflector(m00, m10, 0, il);
// Apply the reflector to X
apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il);
m_ref_nr.coeffRef(il + 1) = 1;
return;
}
// For block size >=3, use the regular strategy
// m20 = x21 * x10
const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il);
compute_reflector(m00, m10, m20, il);
// Apply the first reflector
apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + (std::min)(bsize, Index(4)), 3), m_n, il);
// Calculate the following reflectors
// If entering this loop, block size is at least 4.
for (Index i = 1; i < bsize - 2; i++)
{
compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i);
// Apply the reflector to X
apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i);
apply_XP(m_mat_H.block(0, il + i, il + (std::min)(bsize, Index(i + 4)), 3), m_n, il + i);
}
// The last reflector
// This causes nr=2
compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1);
// Apply the reflector to X
apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1);
apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1);
m_ref_nr.coeffRef(iu) = 1;
}
// P = I - 2 * u * u' = P'
// PX = X - 2 * u * (u'X)
void apply_PX(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1;
const Index nrow = X.rows();
const Index ncol = X.cols();
Scalar* xptr = X.data();
if (nr == 2 || nrow == 2)
{
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
}
}
else
{
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
xptr[2] -= tmp * u2;
}
}
}
// x is a pointer to a vector
// Px = x - 2 * dot(x, u) * u
void apply_PX(Scalar* x, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind),
u2 = m_ref_u.coeff(2, u_ind);
// When the reflector only contains two elements, u2 has been set to zero
const bool nr_is_2 = (nr == 2);
const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2)));
x[0] -= dot2 * u0;
x[1] -= dot2 * u1;
if (!nr_is_2)
x[2] -= dot2 * u2;
}
// XP = X - 2 * (X * u) * u'
void apply_XP(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1;
const int nrow = X.rows();
const int ncol = X.cols();
Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1)
if (nr == 2 || ncol == 2)
{
// tmp = 2 * u0 * X0 + 2 * u1 * X1
// X0 => X0 - u0 * tmp
// X1 => X1 - u1 * tmp
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
}
}
else
{
Scalar* X2 = X1 + stride; // X2 => X.col(2)
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
X2[i] -= tmp * u2;
}
}
}
public:
DoubleShiftQR(Index size) :
m_n(size),
m_computed(false)
{}
DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) :
m_n(mat.rows()),
m_mat_H(m_n, m_n),
m_shift_s(s),
m_shift_t(t),
m_ref_u(3, m_n),
m_ref_nr(m_n),
m_computed(false)
{
compute(mat, s, t);
}
void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("DoubleShiftQR: matrix must be square");
m_mat_H.resize(m_n, m_n);
m_shift_s = s;
m_shift_t = t;
m_ref_u.resize(3, m_n);
m_ref_nr.resize(m_n);
// Make a copy of mat
m_mat_H.noalias() = mat;
// Obtain the indices of zero elements in the subdiagonal,
// so that H can be divided into several blocks
const Scalar eps_abs = m_near_0 * (m_n / m_eps);
constexpr Scalar eps_rel = m_eps;
std::vector<int> zero_ind;
zero_ind.reserve(m_n - 1);
zero_ind.push_back(0);
Scalar* Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1))
{
// Hii[0] => m_mat_H(i, i)
// Hii[1] => m_mat_H(i + 1, i)
// Hii[m_n + 1] => m_mat_H(i + 1, i + 1)
const Scalar h = abs(Hii[1]);
// Deflate small sub-diagonal elements
const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]);
if (h <= eps_abs || h <= eps_rel * diag)
{
Hii[1] = 0;
zero_ind.push_back(i + 1);
}
// Make sure m_mat_H is upper Hessenberg
// Zero the elements below m_mat_H(i + 1, i)
std::fill(Hii + 2, Hii + m_n - i, Scalar(0));
}
zero_ind.push_back(m_n);
const Index len = zero_ind.size() - 1;
for (Index i = 0; i < len; i++)
{
const Index start = zero_ind[i];
const Index end = zero_ind[i + 1] - 1;
// Compute refelctors and update each block
update_block(start, end);
}
// Deflation on the computed result
Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1))
{
const Scalar h = abs(Hii[1]);
const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]);
if (h <= eps_abs || h <= eps_rel * diag)
Hii[1] = 0;
}
m_computed = true;
}
void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
dest.noalias() = m_mat_H;
}
// Q = P0 * P1 * ...
// Q'y = P_{n-2} * ... * P1 * P0 * y
void apply_QtY(Vector& y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
Scalar* y_ptr = y.data();
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++, y_ptr++)
{
apply_PX(y_ptr, i);
}
}
// Q = P0 * P1 * ...
// YQ = Y * P0 * P1 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
const Index nrow = Y.rows();
const Index n2 = m_n - 2;
for (Index i = 0; i < n2; i++)
{
apply_XP(Y.block(0, i, nrow, 3), nrow, i);
}
apply_XP(Y.block(0, n2, nrow, 2), nrow, n2);
}
};
} // namespace Spectra
#endif // SPECTRA_DOUBLE_SHIFT_QR_H
| 14,768 | 32.489796 | 111 | h |
abess | abess-master/include/Spectra/LinAlg/Lanczos.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_LANCZOS_H
#define SPECTRA_LANCZOS_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <utility> // std::forward
#include <stdexcept> // std::invalid_argument
#include "Arnoldi.h"
namespace Spectra {
// Lanczos factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Lanczos : public Arnoldi<Scalar, ArnoldiOpType>
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstMat = Eigen::Map<const Matrix>;
using Arnoldi<Scalar, ArnoldiOpType>::m_op;
using Arnoldi<Scalar, ArnoldiOpType>::m_n;
using Arnoldi<Scalar, ArnoldiOpType>::m_m;
using Arnoldi<Scalar, ArnoldiOpType>::m_k;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f;
using Arnoldi<Scalar, ArnoldiOpType>::m_beta;
using Arnoldi<Scalar, ArnoldiOpType>::m_near_0;
using Arnoldi<Scalar, ArnoldiOpType>::m_eps;
public:
// Forward parameter `op` to the constructor of Arnoldi
template <typename T>
Lanczos(T&& op, Index m) :
Arnoldi<Scalar, ArnoldiOpType>(std::forward<T>(op), m)
{}
// Lanczos factorization starting from step-k
void factorize_from(Index from_k, Index to_m, Index& op_counter) override
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::string msg = "Lanczos: from_k (= " + std::to_string(from_k) +
") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")";
throw std::invalid_argument(msg);
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
this->expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter);
restart = true;
}
// v <- f / ||f||
MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column
v.noalias() = m_fac_f / m_beta;
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry
// w <- A * v
m_op.perform_op(v.data(), w.data());
op_counter++;
// f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1}
// If restarting, we know that H[i+1, i] = 0
// First do w <- w - H[i+1, i] * V{i}, see the discussions in Section 2.3 of
// Cullum and Willoughby (2002). Lanczos Algorithms for Large Symmetric Eigenvalue Computations: Vol. 1
if (!restart)
w.noalias() -= m_fac_H(i, i - 1) * m_fac_V.col(i - 1);
// H[i+1, i+1] = <v, w> = v'Bw
m_fac_H(i, i) = m_op.inner_product(v, w);
// f <- w - H[i+1, i+1] * V{i+1}
m_fac_f.noalias() = w - m_fac_H(i, i) * v;
m_beta = m_op.norm(m_fac_f);
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
const Index i1 = i + 1;
MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
m_fac_H(i - 1, i) += Vf[i - 1];
m_fac_H(i, i - 1) = m_fac_H(i - 1, i);
m_fac_H(i, i) += Vf[i];
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition
// Function overloading here, not overriding
void compress_H(const TridiagQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
};
} // namespace Spectra
#endif // SPECTRA_LANCZOS_H
| 6,282 | 35.52907 | 115 | h |
abess | abess-master/include/Spectra/LinAlg/Orthogonalization.h | // Copyright (C) 2020 Netherlands eScience Center <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ORTHOGONALIZATION_H
#define SPECTRA_ORTHOGONALIZATION_H
#include <Eigen/Core>
#include <Eigen/QR>
namespace Spectra {
/// Check if the number of columns to skip is
/// larger than 0 but smaller than the total number
/// of columns of the matrix
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void assert_left_cols_to_skip(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
assert(in_output.cols() > left_cols_to_skip && "left_cols_to_skip is larger than columns of matrix");
assert(left_cols_to_skip >= 0 && "left_cols_to_skip is negative");
}
/// If the the number of columns to skip is null,
/// normalize the first column and set left_cols_to_skip=1
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
/// \return Actual number of left columns to skip
template <typename Matrix>
Eigen::Index treat_first_col(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
if (left_cols_to_skip == 0)
{
in_output.col(0).normalize();
left_cols_to_skip = 1;
}
return left_cols_to_skip;
}
/// Orthogonalize the in_output matrix using a QR decomposition
/// \param in_output Matrix to be orthogonalized
template <typename Matrix>
void QR_orthogonalisation(Matrix& in_output)
{
using InternalMatrix = Eigen::Matrix<typename Matrix::Scalar, Eigen::Dynamic, Eigen::Dynamic>;
Eigen::Index nrows = in_output.rows();
Eigen::Index ncols = in_output.cols();
ncols = (std::min)(nrows, ncols);
InternalMatrix I = InternalMatrix::Identity(nrows, ncols);
Eigen::HouseholderQR<Matrix> qr(in_output);
in_output.leftCols(ncols).noalias() = qr.householderQ() * I;
}
/// Orthogonalize the in_output matrix using a modified Gram Schmidt process
/// \param in_output matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void MGS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip);
for (Eigen::Index k = left_cols_to_skip; k < in_output.cols(); ++k)
{
for (Eigen::Index j = 0; j < k; j++)
{
in_output.col(k) -= in_output.col(j).dot(in_output.col(k)) * in_output.col(j);
}
in_output.col(k).normalize();
}
}
/// Orthogonalize the in_output matrix using a Gram Schmidt process
/// \param in_output matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void GS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip);
for (Eigen::Index j = left_cols_to_skip; j < in_output.cols(); ++j)
{
in_output.col(j) -= in_output.leftCols(j) * (in_output.leftCols(j).transpose() * in_output.col(j));
in_output.col(j).normalize();
}
}
/// Orthogonalize the subspace spanned by right columns of in_output
/// against the subspace spanned by left columns
/// It assumes that the left columns are already orthogonal and normalized,
/// and it does not orthogonalize the left columns against each other
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void subspace_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
if (left_cols_to_skip == 0)
{
return;
}
Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip;
in_output.rightCols(right_cols_to_ortho) -= in_output.leftCols(left_cols_to_skip) *
(in_output.leftCols(left_cols_to_skip).transpose() * in_output.rightCols(right_cols_to_ortho));
}
/// Orthogonalize the in_output matrix using a Jens process
/// The subspace spanned by right columns are first orthogonalized
/// agains the left columns, and then a QR decomposition is applied on the right columns
/// to make them orthogonalized agains each other
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void JensWehner_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip;
subspace_orthogonalisation(in_output, left_cols_to_skip);
Eigen::Ref<Matrix> right_cols = in_output.rightCols(right_cols_to_ortho);
QR_orthogonalisation(right_cols);
}
/// Orthogonalize the in_output matrix using a twice-is-enough Jens process
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void twice_is_enough_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
JensWehner_orthogonalisation(in_output, left_cols_to_skip);
JensWehner_orthogonalisation(in_output, left_cols_to_skip);
}
} // namespace Spectra
#endif //SPECTRA_ORTHOGONALIZATION_H
| 5,705 | 39.183099 | 107 | h |
abess | abess-master/include/Spectra/LinAlg/RitzPairs.h | // Copyright (C) 2020 Netherlands eScience Center <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_RITZ_PAIRS_H
#define SPECTRA_RITZ_PAIRS_H
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "../Util/SelectionRule.h"
namespace Spectra {
template <typename Scalar>
class SearchSpace;
/// This class handles the creation and manipulation of Ritz eigen pairs
/// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc.
template <typename Scalar>
class RitzPairs
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
Vector m_values; // eigenvalues
Matrix m_small_vectors; // eigenvectors of the small problem, makes restart cheaper.
Matrix m_vectors; // Ritz (or harmonic Ritz) eigenvectors
Matrix m_residues; // residues of the pairs
BoolArray m_root_converged;
public:
RitzPairs() = default;
/// Compute the eigen values/vectors
///
/// \param search_space Instance of the class handling the search space
/// \return Eigen::ComputationalInfo Whether small eigenvalue problem worked
Eigen::ComputationInfo compute_eigen_pairs(const SearchSpace<Scalar>& search_space);
/// Returns the size of the ritz eigen pairs
///
/// \return Eigen::Index Number of pairs
Index size() const { return m_values.size(); }
/// Sort the eigen pairs according to the selection rule
///
/// \param selection Sorting rule
void sort(SortRule selection)
{
std::vector<Index> ind = argsort(selection, m_values);
RitzPairs<Scalar> temp = *this;
for (Index i = 0; i < size(); i++)
{
m_values[i] = temp.m_values[ind[i]];
m_vectors.col(i) = temp.m_vectors.col(ind[i]);
m_residues.col(i) = temp.m_residues.col(ind[i]);
m_small_vectors.col(i) = temp.m_small_vectors.col(ind[i]);
}
}
/// Checks if the algorithm has converged and updates root_converged
///
/// \param tol Tolerance for convergence
/// \param number_eigenvalue Number of request eigenvalues
/// \return bool true if all eigenvalues are converged
bool check_convergence(Scalar tol, Index number_eigenvalues)
{
const Array norms = m_residues.colwise().norm();
bool converged = true;
m_root_converged = BoolArray::Zero(norms.size());
for (Index j = 0; j < norms.size(); j++)
{
m_root_converged[j] = (norms[j] < tol);
if (j < number_eigenvalues)
{
converged &= (norms[j] < tol);
}
}
return converged;
}
const Matrix& ritz_vectors() const { return m_vectors; }
const Vector& ritz_values() const { return m_values; }
const Matrix& small_ritz_vectors() const { return m_small_vectors; }
const Matrix& residues() const { return m_residues; }
const BoolArray& converged_eigenvalues() const { return m_root_converged; }
};
} // namespace Spectra
#include "SearchSpace.h"
namespace Spectra {
/// Creates the small space matrix and computes its eigen pairs
/// Also computes the ritz vectors and residues
///
/// \param search_space Instance of the SearchSpace class
template <typename Scalar>
Eigen::ComputationInfo RitzPairs<Scalar>::compute_eigen_pairs(const SearchSpace<Scalar>& search_space)
{
const Matrix& basis_vectors = search_space.basis_vectors();
const Matrix& op_basis_prod = search_space.operator_basis_product();
// Form the small eigenvalue
Matrix small_matrix = basis_vectors.transpose() * op_basis_prod;
// Small eigenvalue problem
Eigen::SelfAdjointEigenSolver<Matrix> eigen_solver(small_matrix);
m_values = eigen_solver.eigenvalues();
m_small_vectors = eigen_solver.eigenvectors();
// Ritz vectors
m_vectors = basis_vectors * m_small_vectors;
// Residues
m_residues = op_basis_prod * m_small_vectors - m_vectors * m_values.asDiagonal();
return eigen_solver.info();
}
} // namespace Spectra
#endif // SPECTRA_RITZ_PAIRS_H
| 4,472 | 33.145038 | 102 | h |
abess | abess-master/include/Spectra/LinAlg/SearchSpace.h | // Copyright (C) 2020 Netherlands eScience Center <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SEARCH_SPACE_H
#define SPECTRA_SEARCH_SPACE_H
#include <Eigen/Core>
#include "RitzPairs.h"
#include "Orthogonalization.h"
namespace Spectra {
/// This class handles the creation and manipulation of the search space
/// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc.
template <typename Scalar>
class SearchSpace
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
Matrix m_basis_vectors;
Matrix m_op_basis_product;
/// Append new vector to the basis
///
/// \param new_vect Matrix of new correction vectors
void append_new_vectors_to_basis(const Matrix& new_vect)
{
Index num_update = new_vect.cols();
m_basis_vectors.conservativeResize(Eigen::NoChange, m_basis_vectors.cols() + num_update);
m_basis_vectors.rightCols(num_update).noalias() = new_vect;
}
public:
SearchSpace() = default;
/// Returns the current size of the search space
Index size() const { return m_basis_vectors.cols(); }
void initialize_search_space(const Eigen::Ref<const Matrix>& initial_vectors)
{
m_basis_vectors = initial_vectors;
m_op_basis_product = Matrix(initial_vectors.rows(), 0);
}
/// Updates the matrix formed by the operator applied to the search space
/// after the addition of new vectors in the search space. Only the product
/// of the operator with the new vectors is computed and the result is appended
/// to the op_basis_product member variable
///
/// \param OpType Operator representing the matrix
template <typename OpType>
void update_operator_basis_product(OpType& op)
{
Index nvec = m_basis_vectors.cols() - m_op_basis_product.cols();
m_op_basis_product.conservativeResize(Eigen::NoChange, m_basis_vectors.cols());
m_op_basis_product.rightCols(nvec).noalias() = op * m_basis_vectors.rightCols(nvec);
}
/// Restart the search space by reducing the basis vector to the last
/// Ritz eigenvector
///
/// \param ritz_pair Instance of a RitzPair class
/// \param size Size of the restart
void restart(const RitzPairs<Scalar>& ritz_pairs, Index size)
{
m_basis_vectors = ritz_pairs.ritz_vectors().leftCols(size);
m_op_basis_product = m_op_basis_product * ritz_pairs.small_ritz_vectors().leftCols(size);
}
/// Append new vectors to the search space and
/// orthogonalize the resulting matrix
///
/// \param new_vect Matrix of new correction vectors
void extend_basis(const Matrix& new_vect)
{
Index left_cols_to_skip = size();
append_new_vectors_to_basis(new_vect);
twice_is_enough_orthogonalisation(m_basis_vectors, left_cols_to_skip);
}
/// Returns the basis vectors
const Matrix& basis_vectors() const { return m_basis_vectors; }
/// Returns the operator applied to basis vector
const Matrix& operator_basis_product() const { return m_op_basis_product; }
};
} // namespace Spectra
#endif // SPECTRA_SEARCH_SPACE_H
| 3,388 | 33.938144 | 97 | h |
abess | abess-master/include/Spectra/LinAlg/TridiagEigen.h | // The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
// Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_TRIDIAG_EIGEN_H
#define SPECTRA_TRIDIAG_EIGEN_H
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class TridiagEigen
{
private:
using Index = Eigen::Index;
// For convenience in adapting the tridiagonal_qr_step() function
using RealScalar = Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
Index m_n;
Vector m_main_diag; // Main diagonal elements of the matrix
Vector m_sub_diag; // Sub-diagonal elements of the matrix
Matrix m_evecs; // To store eigenvectors
bool m_computed;
// Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
// Francis implicit QR step.
static void tridiagonal_qr_step(RealScalar* diag,
RealScalar* subdiag, Index start,
Index end, Scalar* matrixQ,
Index n)
{
using std::abs;
// Wilkinson Shift.
RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5);
RealScalar e = subdiag[end - 1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if (td == RealScalar(0))
mu -= abs(e);
else if (e != RealScalar(0))
{
const RealScalar e2 = Eigen::numext::abs2(e);
const RealScalar h = Eigen::numext::hypot(td, e);
if (e2 == RealScalar(0))
mu -= e / ((td + (td > RealScalar(0) ? h : -h)) / e);
else
mu -= e2 / (td + (td > RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
Eigen::Map<Matrix> q(matrixQ, n, n);
// If z ever becomes zero, the Givens rotation will be the identity and
// z will stay zero for all future iterations.
for (Index k = start; k < end && z != RealScalar(0); ++k)
{
Eigen::JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
const RealScalar s = rot.s();
const RealScalar c = rot.c();
// do T = G' T G
RealScalar sdk = s * diag[k] + c * subdiag[k];
RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1];
diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]);
diag[k + 1] = s * sdk + c * dkp1;
subdiag[k] = c * sdk - s * dkp1;
if (k > start)
subdiag[k - 1] = c * subdiag[k - 1] - s * z;
// "Chasing the bulge" to return to triangular form.
x = subdiag[k];
if (k < end - 1)
{
z = -s * subdiag[k + 1];
subdiag[k + 1] = c * subdiag[k + 1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
q.applyOnTheRight(k, k + 1, rot);
}
}
public:
TridiagEigen() :
m_n(0), m_computed(false)
{}
TridiagEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("TridiagEigen: matrix must be square");
m_main_diag.resize(m_n);
m_sub_diag.resize(m_n - 1);
m_evecs.resize(m_n, m_n);
m_evecs.setIdentity();
// Scale matrix to improve stability
const Scalar scale = (std::max)(mat.diagonal().cwiseAbs().maxCoeff(),
mat.diagonal(-1).cwiseAbs().maxCoeff());
// If scale=0, mat is a zero matrix, so we can early stop
if (scale < near_0)
{
// m_main_diag contains eigenvalues
m_main_diag.setZero();
// m_evecs has been set identity
// m_evecs.setIdentity();
m_computed = true;
return;
}
m_main_diag.noalias() = mat.diagonal() / scale;
m_sub_diag.noalias() = mat.diagonal(-1) / scale;
Scalar* diag = m_main_diag.data();
Scalar* subdiag = m_sub_diag.data();
Index end = m_n - 1;
Index start = 0;
Index iter = 0; // total number of iterations
int info = 0; // 0 for success, 1 for failure
const Scalar considerAsZero = TypeTraits<Scalar>::min();
const Scalar precision_inv = Scalar(1) / Eigen::NumTraits<Scalar>::epsilon();
while (end > 0)
{
for (Index i = start; i < end; i++)
{
if (abs(subdiag[i]) <= considerAsZero)
subdiag[i] = Scalar(0);
else
{
// abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1]))
// Scaled to prevent underflows.
const Scalar scaled_subdiag = precision_inv * subdiag[i];
if (scaled_subdiag * scaled_subdiag <= (abs(diag[i]) + abs(diag[i + 1])))
subdiag[i] = Scalar(0);
}
}
// find the largest unreduced block at the end of the matrix.
while (end > 0 && subdiag[end - 1] == Scalar(0))
end--;
if (end <= 0)
break;
// if we spent too many iterations, we give up
iter++;
if (iter > 30 * m_n)
{
info = 1;
break;
}
start = end - 1;
while (start > 0 && subdiag[start - 1] != Scalar(0))
start--;
tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n);
}
if (info > 0)
throw std::runtime_error("TridiagEigen: eigen decomposition failed");
// Scale eigenvalues back
m_main_diag *= scale;
m_computed = true;
}
const Vector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
// After calling compute(), main_diag will contain the eigenvalues.
return m_main_diag;
}
const Matrix& eigenvectors() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
return m_evecs;
}
};
} // namespace Spectra
#endif // SPECTRA_TRIDIAG_EIGEN_H
| 7,776 | 32.666667 | 98 | h |
abess | abess-master/include/Spectra/LinAlg/UpperHessenbergSchur.h | // The code was adapted from Eigen/src/Eigenvaleus/RealSchur.h
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
// Copyright (C) 2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_UPPER_HESSENBERG_SCHUR_H
#define SPECTRA_UPPER_HESSENBERG_SCHUR_H
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <Eigen/Householder>
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class UpperHessenbergSchur
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Vector2s = Eigen::Matrix<Scalar, 2, 1>;
using Vector3s = Eigen::Matrix<Scalar, 3, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
Index m_n; // Size of the matrix
Matrix m_T; // T matrix, A = UTU'
Matrix m_U; // U matrix, A = UTU'
bool m_computed;
// L1 norm of an upper Hessenberg matrix
static Scalar upper_hessenberg_l1_norm(ConstGenericMatrix& x)
{
const Index n = x.cols();
Scalar norm(0);
for (Index j = 0; j < n; j++)
norm += x.col(j).segment(0, (std::min)(n, j + 2)).cwiseAbs().sum();
return norm;
}
// Look for single small sub-diagonal element and returns its index
Index find_small_subdiag(Index iu, const Scalar& near_0) const
{
using std::abs;
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_T.coeff(res - 1, res - 1)) + abs(m_T.coeff(res, res));
s = Eigen::numext::maxi<Scalar>(s * eps, near_0);
if (abs(m_T.coeff(res, res - 1)) <= s)
break;
res--;
}
return res;
}
// Update T given that rows iu-1 and iu decouple from the rest
void split_off_two_rows(Index iu, const Scalar& ex_shift)
{
using std::sqrt;
using std::abs;
// The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
Scalar p = Scalar(0.5) * (m_T.coeff(iu - 1, iu - 1) - m_T.coeff(iu, iu));
Scalar q = p * p + m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu); // q = tr^2 / 4 - det = discr/4
m_T.coeffRef(iu, iu) += ex_shift;
m_T.coeffRef(iu - 1, iu - 1) += ex_shift;
if (q >= Scalar(0)) // Two real eigenvalues
{
Scalar z = sqrt(abs(q));
Eigen::JacobiRotation<Scalar> rot;
rot.makeGivens((p >= Scalar(0)) ? (p + z) : (p - z), m_T.coeff(iu, iu - 1));
m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint());
m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot);
m_T.coeffRef(iu, iu - 1) = Scalar(0);
m_U.applyOnTheRight(iu - 1, iu, rot);
}
if (iu > 1)
m_T.coeffRef(iu - 1, iu - 2) = Scalar(0);
}
// Form shift in shift_info, and update ex_shift if an exceptional shift is performed
void compute_shift(Index iu, Index iter, Scalar& ex_shift, Vector3s& shift_info)
{
using std::sqrt;
using std::abs;
shift_info.coeffRef(0) = m_T.coeff(iu, iu);
shift_info.coeffRef(1) = m_T.coeff(iu - 1, iu - 1);
shift_info.coeffRef(2) = m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu);
// Wilkinson's original ad hoc shift
if (iter == 10)
{
ex_shift += shift_info.coeff(0);
for (Index i = 0; i <= iu; ++i)
m_T.coeffRef(i, i) -= shift_info.coeff(0);
Scalar s = abs(m_T.coeff(iu, iu - 1)) + abs(m_T.coeff(iu - 1, iu - 2));
shift_info.coeffRef(0) = Scalar(0.75) * s;
shift_info.coeffRef(1) = Scalar(0.75) * s;
shift_info.coeffRef(2) = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
Scalar s = (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2);
s = s * s + shift_info.coeff(2);
if (s > Scalar(0))
{
s = sqrt(s);
if (shift_info.coeff(1) < shift_info.coeff(0))
s = -s;
s = s + (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2);
s = shift_info.coeff(0) - shift_info.coeff(2) / s;
ex_shift += s;
for (Index i = 0; i <= iu; ++i)
m_T.coeffRef(i, i) -= s;
shift_info.setConstant(Scalar(0.964));
}
}
}
// Compute index im at which Francis QR step starts and the first Householder vector
void init_francis_qr_step(Index il, Index iu, const Vector3s& shift_info, Index& im, Vector3s& first_householder_vec) const
{
using std::abs;
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
Vector3s& v = first_householder_vec; // alias to save typing
for (im = iu - 2; im >= il; --im)
{
const Scalar Tmm = m_T.coeff(im, im);
const Scalar r = shift_info.coeff(0) - Tmm;
const Scalar s = shift_info.coeff(1) - Tmm;
v.coeffRef(0) = (r * s - shift_info.coeff(2)) / m_T.coeff(im + 1, im) + m_T.coeff(im, im + 1);
v.coeffRef(1) = m_T.coeff(im + 1, im + 1) - Tmm - r - s;
v.coeffRef(2) = m_T.coeff(im + 2, im + 1);
if (im == il)
break;
const Scalar lhs = m_T.coeff(im, im - 1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_T.coeff(im - 1, im - 1)) + abs(Tmm) + abs(m_T.coeff(im + 1, im + 1)));
if (abs(lhs) < eps * rhs)
break;
}
}
// P = I - tau * v * v' = P'
// PX = X - tau * v * (v'X), X [3 x c]
static void apply_householder_left(const Vector2s& ess, const Scalar& tau, Scalar* x, Index ncol, Index stride)
{
const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1);
const Scalar* const x_end = x + ncol * stride;
for (; x < x_end; x += stride)
{
const Scalar tvx = tau * (x[0] + v1 * x[1] + v2 * x[2]);
x[0] -= tvx;
x[1] -= tvx * v1;
x[2] -= tvx * v2;
}
}
// P = I - tau * v * v' = P'
// XP = X - tau * (X * v) * v', X [r x 3]
static void apply_householder_right(const Vector2s& ess, const Scalar& tau, Scalar* x, Index nrow, Index stride)
{
const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1);
Scalar* x0 = x;
Scalar* x1 = x + stride;
Scalar* x2 = x1 + stride;
for (Index i = 0; i < nrow; i++)
{
const Scalar txv = tau * (x0[i] + v1 * x1[i] + v2 * x2[i]);
x0[i] -= txv;
x1[i] -= txv * v1;
x2[i] -= txv * v2;
}
}
// Perform a Francis QR step involving rows il:iu and columns im:iu
void perform_francis_qr_step(Index il, Index im, Index iu, const Vector3s& first_householder_vec, const Scalar& near_0)
{
using std::abs;
for (Index k = im; k <= iu - 2; ++k)
{
const bool first_iter = (k == im);
Vector3s v;
if (first_iter)
v = first_householder_vec;
else
v = m_T.template block<3, 1>(k, k - 1);
Scalar tau, beta;
Vector2s ess;
v.makeHouseholder(ess, tau, beta);
if (abs(beta) > near_0) // if v is not zero
{
if (first_iter && k > il)
m_T.coeffRef(k, k - 1) = -m_T.coeff(k, k - 1);
else if (!first_iter)
m_T.coeffRef(k, k - 1) = beta;
// These Householder transformations form the O(n^3) part of the algorithm
// m_T.block(k, k, 3, m_n - k).applyHouseholderOnTheLeft(ess, tau, workspace);
// m_T.block(0, k, (std::min)(iu, k + 3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
// m_U.block(0, k, m_n, 3).applyHouseholderOnTheRight(ess, tau, workspace);
apply_householder_left(ess, tau, &m_T.coeffRef(k, k), m_n - k, m_n);
apply_householder_right(ess, tau, &m_T.coeffRef(0, k), (std::min)(iu, k + 3) + 1, m_n);
apply_householder_right(ess, tau, &m_U.coeffRef(0, k), m_n, m_n);
}
}
// The last 2-row block
Eigen::JacobiRotation<Scalar> rot;
Scalar beta;
rot.makeGivens(m_T.coeff(iu - 1, iu - 2), m_T.coeff(iu, iu - 2), &beta);
if (abs(beta) > near_0) // if v is not zero
{
m_T.coeffRef(iu - 1, iu - 2) = beta;
m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint());
m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot);
m_U.applyOnTheRight(iu - 1, iu, rot);
}
// clean up pollution due to round-off errors
for (Index i = im + 2; i <= iu; ++i)
{
m_T.coeffRef(i, i - 2) = Scalar(0);
if (i > im + 2)
m_T.coeffRef(i, i - 3) = Scalar(0);
}
}
public:
UpperHessenbergSchur() :
m_n(0), m_computed(false)
{}
UpperHessenbergSchur(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
using std::sqrt;
if (mat.rows() != mat.cols())
throw std::invalid_argument("UpperHessenbergSchur: matrix must be square");
m_n = mat.rows();
m_T.resize(m_n, m_n);
m_U.resize(m_n, m_n);
constexpr Index max_iter_per_row = 40;
const Index max_iter = m_n * max_iter_per_row;
m_T.noalias() = mat;
m_U.setIdentity();
// The matrix m_T is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_T(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_n - 1;
Index iter = 0; // iteration count for current eigenvalue
Index total_iter = 0; // iteration count for whole matrix
Scalar ex_shift(0); // sum of exceptional shifts
const Scalar norm = upper_hessenberg_l1_norm(m_T);
// sub-diagonal entries smaller than near_0 will be treated as zero.
// We use eps^2 to enable more precision in small eigenvalues.
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
const Scalar near_0 = Eigen::numext::maxi<Scalar>(norm * eps * eps, TypeTraits<Scalar>::min());
if (norm != Scalar(0))
{
while (iu >= 0)
{
Index il = find_small_subdiag(iu, near_0);
// Check for convergence
if (il == iu) // One root found
{
m_T.coeffRef(iu, iu) += ex_shift;
if (iu > 0)
m_T.coeffRef(iu, iu - 1) = Scalar(0);
iu--;
iter = 0;
}
else if (il == iu - 1) // Two roots found
{
split_off_two_rows(iu, ex_shift);
iu -= 2;
iter = 0;
}
else // No convergence yet
{
Vector3s first_householder_vec = Vector3s::Zero(), shift_info;
compute_shift(iu, iter, ex_shift, shift_info);
iter++;
total_iter++;
if (total_iter > max_iter)
break;
Index im;
init_francis_qr_step(il, iu, shift_info, im, first_householder_vec);
perform_francis_qr_step(il, im, iu, first_householder_vec, near_0);
}
}
}
if (total_iter > max_iter)
throw std::runtime_error("UpperHessenbergSchur: Schur decomposition failed");
m_computed = true;
}
const Matrix& matrix_T() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergSchur: need to call compute() first");
return m_T;
}
const Matrix& matrix_U() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergSchur: need to call compute() first");
return m_U;
}
void swap_T(Matrix& other)
{
m_T.swap(other);
}
void swap_U(Matrix& other)
{
m_U.swap(other);
}
};
} // namespace Spectra
#endif // SPECTRA_UPPER_HESSENBERG_SCHUR_H
| 13,184 | 35.123288 | 127 | h |
abess | abess-master/include/Spectra/MatOp/DenseCholesky.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_CHOLESKY_H
#define SPECTRA_DENSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseCholesky
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
const Index m_n;
Eigen::LLT<Matrix, Uplo> m_decomp;
CompInfo m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseCholesky(const Eigen::MatrixBase<Derived>& mat) :
m_n(mat.rows()), m_info(CompInfo::NotComputed)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != mat.cols())
throw std::invalid_argument("DenseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
CompInfo::Successful :
CompInfo::NumericalIssue;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixL().solve(x);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_CHOLESKY_H
| 4,101 | 31.555556 | 129 | h |
abess | abess-master/include/Spectra/MatOp/DenseGenMatProd.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_GEN_MAT_PROD_H
#define SPECTRA_DENSE_GEN_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \defgroup MatOp Matrix Operations
///
/// Define matrix operations on existing matrix objects
///
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and
/// SymEigsSolver eigen solvers.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Flags = Eigen::ColMajor>
class DenseGenMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseGenMatProd(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_GEN_MAT_PROD_H
| 3,352 | 28.672566 | 131 | h |
abess | abess-master/include/Spectra/MatOp/DenseSymMatProd.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_SYM_MAT_PROD_H
#define SPECTRA_DENSE_SYM_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseSymMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseSymMatProd(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat.template selfadjointView<Uplo>() * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_SYM_MAT_PROD_H
| 3,452 | 30.972222 | 131 | h |
abess | abess-master/include/Spectra/MatOp/DenseSymShiftSolve.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
#define SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <stdexcept>
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseSymShiftSolve
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
const Index m_n;
BKLDLT<Scalar> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseSymShiftSolve(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat), m_n(mat.rows())
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseSymShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != mat.cols())
throw std::invalid_argument("DenseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_solver.compute(m_mat, Uplo, sigma);
if (m_solver.info() != CompInfo::Successful)
throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
| 3,643 | 31.828829 | 134 | h |
abess | abess-master/include/Spectra/MatOp/SparseCholesky.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_CHOLESKY_H
#define SPECTRA_SPARSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseCholesky
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
const Index m_n;
Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp;
CompInfo m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseCholesky(const Eigen::SparseMatrixBase<Derived>& mat) :
m_n(mat.rows())
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
CompInfo::Successful :
CompInfo::NumericalIssue;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.permutationP() * x;
m_decomp.matrixL().solveInPlace(y);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
y = m_decomp.permutationPinv() * y;
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_CHOLESKY_H
| 4,334 | 32.604651 | 130 | h |
abess | abess-master/include/Spectra/MatOp/SparseGenMatProd.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_GEN_MAT_PROD_H
#define SPECTRA_SPARSE_GEN_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver
/// eigen solvers.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseGenMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseGenMatProd(const Eigen::SparseMatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat.coeff(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_GEN_MAT_PROD_H
| 3,470 | 31.138889 | 132 | h |
abess | abess-master/include/Spectra/MatOp/SparseSymMatProd.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_SYM_MAT_PROD_H
#define SPECTRA_SPARSE_SYM_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseSymMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseSymMatProd(const Eigen::SparseMatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat.template selfadjointView<Uplo>() * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat.coeff(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_SYM_MAT_PROD_H
| 3,695 | 32.908257 | 132 | h |
abess | abess-master/include/Spectra/MatOp/SymShiftInvert.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_SHIFT_INVERT_H
#define SPECTRA_SYM_SHIFT_INVERT_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
#include <type_traits> // std::conditional, std::is_same
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
/// \cond
// Compute and factorize A-sigma*B without unnecessary copying
// Default case: A is sparse, B is sparse
template <bool AIsSparse, bool BIsSparse, int UploA, int UploB>
class SymShiftInvertHelper
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using SpMat = typename ArgA::PlainObject;
SpMat matA = A.template selfadjointView<UploA>();
SpMat matB = B.template selfadjointView<UploB>();
SpMat mat = matA - sigma * matB;
// SparseLU solver
fac.isSymmetric(true);
fac.compute(mat);
// Return true if successful
return fac.info() == Eigen::Success;
}
};
// A is dense, B is dense or sparse
template <bool BIsSparse, int UploA, int UploB>
class SymShiftInvertHelper<false, BIsSparse, UploA, UploB>
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using Matrix = typename ArgA::PlainObject;
// Make a copy of the <UploA> triangular part of A
Matrix mat(A.rows(), A.cols());
mat.template triangularView<UploA>() = A;
// Update <UploA> triangular part of mat
if (UploA == UploB)
mat -= (B * sigma).template triangularView<UploA>();
else
mat -= (B * sigma).template triangularView<UploB>().transpose();
// BKLDLT solver
fac.compute(mat, UploA);
// Return true if successful
return fac.info() == CompInfo::Successful;
}
};
// A is sparse, B is dense
template <int UploA, int UploB>
class SymShiftInvertHelper<true, false, UploA, UploB>
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using Matrix = typename ArgB::PlainObject;
// Construct the <UploB> triangular part of -sigma*B
Matrix mat(B.rows(), B.cols());
mat.template triangularView<UploB>() = -sigma * B;
// Update <UploB> triangular part of mat
if (UploA == UploB)
mat += A.template triangularView<UploB>();
else
mat += A.template triangularView<UploA>().transpose();
// BKLDLT solver
fac.compute(mat, UploB);
// Return true if successful
return fac.info() == CompInfo::Successful;
}
};
/// \endcond
///
/// \ingroup MatOp
///
/// This class defines matrix operations required by the generalized eigen solver
/// in the shift-and-invert mode. Given two symmetric matrices \f$A\f$ and \f$B\f$,
/// it solves the linear equation \f$y=(A-\sigma B)^{-1}x\f$, where \f$\sigma\f$ is a real shift.
/// Each of \f$A\f$ and \f$B\f$ can be a dense or sparse matrix.
///
/// This class is intended to be used with the SymGEigsShiftSolver generalized eigen solver.
///
/// \tparam Scalar_ The element type of the matrices.
/// Currently supported types are `float`, `double`, and `long double`.
/// \tparam TypeA The type of the \f$A\f$ matrix, indicating whether \f$A\f$ is
/// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`.
/// \tparam TypeB The type of the \f$B\f$ matrix, indicating whether \f$B\f$ is
/// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`.
/// \tparam UploA Whether the lower or upper triangular part of \f$A\f$ should be used.
/// Possible values are `Eigen::Lower` and `Eigen::Upper`.
/// \tparam UploB Whether the lower or upper triangular part of \f$B\f$ should be used.
/// Possible values are `Eigen::Lower` and `Eigen::Upper`.
/// \tparam FlagsA Additional flags for the matrix class of \f$A\f$.
/// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`.
/// \tparam FlagsB Additional flags for the matrix class of \f$B\f$.
/// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`.
/// \tparam StorageIndexA The storage index type of the \f$A\f$ matrix, only used when \f$A\f$
/// is a sparse matrix.
/// \tparam StorageIndexB The storage index type of the \f$B\f$ matrix, only used when \f$B\f$
/// is a sparse matrix.
///
template <typename Scalar_, typename TypeA = Eigen::Sparse, typename TypeB = Eigen::Sparse,
int UploA = Eigen::Lower, int UploB = Eigen::Lower,
int FlagsA = Eigen::ColMajor, int FlagsB = Eigen::ColMajor,
typename StorageIndexA = int, typename StorageIndexB = int>
class SymShiftInvert
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
// Hypothetical type of the A matrix, either dense or sparse
using DenseTypeA = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsA>;
using SparseTypeA = Eigen::SparseMatrix<Scalar, FlagsA, StorageIndexA>;
// Whether A is sparse
using ASparse = std::is_same<TypeA, Eigen::Sparse>;
// Actual type of the A matrix
using MatrixA = typename std::conditional<ASparse::value, SparseTypeA, DenseTypeA>::type;
// Hypothetical type of the B matrix, either dense or sparse
using DenseTypeB = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsB>;
using SparseTypeB = Eigen::SparseMatrix<Scalar, FlagsB, StorageIndexB>;
// Whether B is sparse
using BSparse = std::is_same<TypeB, Eigen::Sparse>;
// Actual type of the B matrix
using MatrixB = typename std::conditional<BSparse::value, SparseTypeB, DenseTypeB>::type;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
// The type of A-sigma*B if one of A and B is dense
// DenseType = if (A is dense) MatrixA else MatrixB
using DenseType = typename std::conditional<ASparse::value, MatrixB, MatrixA>::type;
// The type of A-sigma*B
// If both A and B are sparse, the result is MatrixA; otherwise the result is DenseType
using ResType = typename std::conditional<ASparse::value && BSparse::value, MatrixA, DenseType>::type;
// If both A and B are sparse, then the result A-sigma*B is sparse, so we use
// sparseLU for factorization; otherwise A-sigma*B is dense, and we use BKLDLT
using FacType = typename std::conditional<
ASparse::value && BSparse::value,
Eigen::SparseLU<ResType>,
BKLDLT<Scalar>>::type;
using ConstGenericMatrixA = const Eigen::Ref<const MatrixA>;
using ConstGenericMatrixB = const Eigen::Ref<const MatrixB>;
ConstGenericMatrixA m_matA;
ConstGenericMatrixB m_matB;
const Index m_n;
FacType m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param A A dense or sparse matrix object, whose type can be `Eigen::Matrix<...>`,
/// `Eigen::SparseMatrix<...>`, `Eigen::Map<Eigen::Matrix<...>>`,
/// `Eigen::Map<Eigen::SparseMatrix<...>>`, `Eigen::Ref<Eigen::Matrix<...>>`,
/// `Eigen::Ref<Eigen::SparseMatrix<...>>`, etc.
/// \param B A dense or sparse matrix object.
///
template <typename DerivedA, typename DerivedB>
SymShiftInvert(const Eigen::EigenBase<DerivedA>& A, const Eigen::EigenBase<DerivedB>& B) :
m_matA(A.derived()), m_matB(B.derived()), m_n(A.rows())
{
static_assert(
static_cast<int>(DerivedA::PlainObject::IsRowMajor) == static_cast<int>(MatrixA::IsRowMajor),
"SymShiftInvert: the \"FlagsA\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
static_assert(
static_cast<int>(DerivedB::PlainObject::IsRowMajor) == static_cast<int>(MatrixB::IsRowMajor),
"SymShiftInvert: the \"FlagsB\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != A.cols() || m_n != B.rows() || m_n != B.cols())
throw std::invalid_argument("SymShiftInvert: A and B must be square matrices of the same size");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
constexpr bool AIsSparse = ASparse::value;
constexpr bool BIsSparse = BSparse::value;
using Helper = SymShiftInvertHelper<AIsSparse, BIsSparse, UploA, UploB>;
const bool success = Helper::factorize(m_solver, m_matA, m_matB, sigma);
if (!success)
throw std::invalid_argument("SymShiftInvert: factorization failed with the given shift");
}
///
/// Perform the shift-invert operation \f$y=(A-\sigma B)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_SHIFT_INVERT_H
| 10,177 | 40.373984 | 131 | h |
abess | abess-master/include/Spectra/MatOp/internal/ArnoldiOp.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ARNOLDI_OP_H
#define SPECTRA_ARNOLDI_OP_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
namespace Spectra {
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup Operators Operators
///
/// Different types of operators.
///
///
/// \ingroup Operators
///
/// Operators used in the Arnoldi factorization.
///
template <typename Scalar, typename OpType, typename BOpType>
class ArnoldiOp
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache;
public:
ArnoldiOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
// Move constructor
ArnoldiOp(ArnoldiOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
inline Index rows() const { return m_op.rows(); }
// In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By.
// For regular eigenvalue problems, it is the usual inner product <x, y> = x'y
// Compute <x, y> = x'By
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
m_Bop.perform_op(y.data(), m_cache.data());
return x.dot(m_cache);
}
// Compute res = <X, y> = X'By
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
m_Bop.perform_op(y.data(), m_cache.data());
res.noalias() = x.transpose() * m_cache;
}
// B-norm of a vector, ||x||_B = sqrt(x'Bx)
template <typename Arg>
Scalar norm(const Arg& x) const
{
using std::sqrt;
return sqrt(inner_product<Arg, Arg>(x, x));
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, y_out);
}
};
///
/// \ingroup Operators
///
/// Placeholder for the B-operator when \f$B = I\f$.
///
class IdentityBOp
{};
///
/// \ingroup Operators
///
/// Partial specialization for the case \f$B = I\f$.
///
template <typename Scalar, typename OpType>
class ArnoldiOp<Scalar, OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
public:
ArnoldiOp(const OpType& op, const IdentityBOp& /*Bop*/) :
m_op(op)
{}
inline Index rows() const { return m_op.rows(); }
// Compute <x, y> = x'y
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
return x.dot(y);
}
// Compute res = <X, y> = X'y
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
res.noalias() = x.transpose() * y;
}
// B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm
template <typename Arg>
Scalar norm(const Arg& x) const
{
return x.norm();
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, y_out);
}
};
///
/// @}
///
} // namespace Spectra
#endif // SPECTRA_ARNOLDI_OP_H
| 3,901 | 23.540881 | 100 | h |
abess | abess-master/include/Spectra/MatOp/internal/SymGEigsBucklingOp.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_BUCKLING_OP_H
#define SPECTRA_SYM_GEIGS_BUCKLING_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// buckling mode. It computes \f$y=(K-\sigma K_G)^{-1}Kx\f$ for any
/// vector \f$x\f$, where \f$K\f$ is positive definite, \f$K_G\f$ is symmetric,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsBucklingOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(K-\sigma K_G)^{-1}\f$ matrix operation object.
/// \param Bop The \f$K\f$ matrix operation object.
///
SymGEigsBucklingOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsBucklingOp(SymGEigsBucklingOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
}
///
/// Perform the matrix operation \f$y=(K-\sigma K_G)^{-1}Kx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(K - sigma * K_G) * K * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_BUCKLING_OP_H
| 2,671 | 26.833333 | 79 | h |
abess | abess-master/include/Spectra/MatOp/internal/SymGEigsCayleyOp.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_CAYLEY_OP_H
#define SPECTRA_SYM_GEIGS_CAYLEY_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cayley mode. It computes \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$ for any
/// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsCayleyOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
Scalar m_sigma;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsCayleyOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsCayleyOp(SymGEigsCayleyOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop), m_sigma(other.m_sigma)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
m_sigma = sigma;
}
///
/// Perform the matrix operation \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * (A + sigma * B) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
// inv(A - sigma * B) * (A + sigma * B) * x
// = inv(A - sigma * B) * (A - sigma * B + 2 * sigma * B) * x
// = x + 2 * sigma * inv(A - sigma * B) * B * x
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
MapConstVec x(x_in, this->rows());
MapVec y(y_out, this->rows());
y.noalias() = x + (Scalar(2) * m_sigma) * y;
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_CAYLEY_OP_H
| 3,163 | 28.849057 | 86 | h |
abess | abess-master/include/Spectra/MatOp/internal/SymGEigsCholeskyOp.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
#define SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
#include <Eigen/Core>
#include "../DenseSymMatProd.h"
#include "../DenseCholesky.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any
/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$.
/// This class is intended for internal use.
///
template <typename OpType = DenseSymMatProd<double>,
typename BOpType = DenseCholesky<double>>
class SymGEigsCholeskyOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$A\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsCholeskyOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsCholeskyOp(SymGEigsCholeskyOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * A * inv(L') * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.upper_triangular_solve(x_in, y_out);
m_op.perform_op(y_out, m_cache.data());
m_Bop.lower_triangular_solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
| 2,548 | 27.965909 | 80 | h |
abess | abess-master/include/Spectra/MatOp/internal/SymGEigsRegInvOp.h | // Copyright (C) 2017-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_REG_INV_OP_H
#define SPECTRA_SYM_GEIGS_REG_INV_OP_H
#include <Eigen/Core>
#include "../SparseSymMatProd.h"
#include "../SparseRegularInverse.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// regular inverse mode. This class is intended for internal use.
///
template <typename OpType = SparseSymMatProd<double>,
typename BOpType = SparseRegularInverse<double>>
class SymGEigsRegInvOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$A\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsRegInvOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsRegInvOp(SymGEigsRegInvOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=B^{-1}Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, m_cache.data());
m_Bop.solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_REG_INV_OP_H
| 2,330 | 26.423529 | 79 | h |
abess | abess-master/include/Spectra/MatOp/internal/SymGEigsShiftInvertOp.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
#define SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// shift-and-invert mode. It computes \f$y=(A-\sigma B)^{-1}Bx\f$ for any
/// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsShiftInvertOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsShiftInvertOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsShiftInvertOp(SymGEigsShiftInvertOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
}
///
/// Perform the matrix operation \f$y=(A-\sigma B)^{-1}Bx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * B * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
| 2,702 | 27.15625 | 86 | h |
abess | abess-master/include/Spectra/Util/CompInfo.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_COMP_INFO_H
#define SPECTRA_COMP_INFO_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to report the status of computation.
///
enum class CompInfo
{
Successful, ///< Computation was successful.
NotComputed, ///< Used in eigen solvers, indicating that computation
///< has not been conducted. Users should call
///< the `compute()` member function of solvers.
NotConverging, ///< Used in eigen solvers, indicating that some eigenvalues
///< did not converge. The `compute()`
///< function returns the number of converged eigenvalues.
NumericalIssue ///< Used in various matrix factorization classes, for example in
///< Cholesky decomposition it indicates that the
///< matrix is not positive definite.
};
} // namespace Spectra
#endif // SPECTRA_COMP_INFO_H
| 1,213 | 31.810811 | 85 | h |
abess | abess-master/include/Spectra/Util/GEigsMode.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEIGS_MODE_H
#define SPECTRA_GEIGS_MODE_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to specify the mode of generalized eigenvalue solver.
///
enum class GEigsMode
{
Cholesky, ///< Using Cholesky decomposition to solve generalized eigenvalues.
RegularInverse, ///< Regular inverse mode for generalized eigenvalue solver.
ShiftInvert, ///< Shift-and-invert mode for generalized eigenvalue solver.
Buckling, ///< Buckling mode for generalized eigenvalue solver.
Cayley ///< Cayley transformation mode for generalized eigenvalue solver.
};
} // namespace Spectra
#endif // SPECTRA_GEIGS_MODE_H
| 959 | 32.103448 | 88 | h |
abess | abess-master/include/Spectra/Util/SelectionRule.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SELECTION_RULE_H
#define SPECTRA_SELECTION_RULE_H
#include <vector> // std::vector
#include <cmath> // std::abs
#include <algorithm> // std::sort
#include <complex> // std::complex
#include <utility> // std::pair
#include <stdexcept> // std::invalid_argument
#include <Eigen/Core>
#include "TypeTraits.h"
namespace Spectra {
///
/// \defgroup Enumerations Enumerations
///
/// Enumeration types for the selection rule of eigenvalues.
///
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues.
///
enum class SortRule
{
LargestMagn, ///< Select eigenvalues with largest magnitude. Magnitude
///< means the absolute value for real numbers and norm for
///< complex numbers. Applies to both symmetric and general
///< eigen solvers.
LargestReal, ///< Select eigenvalues with largest real part. Only for general eigen solvers.
LargestImag, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers.
LargestAlge, ///< Select eigenvalues with largest algebraic value, considering
///< any negative sign. Only for symmetric eigen solvers.
SmallestMagn, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general
///< eigen solvers.
SmallestReal, ///< Select eigenvalues with smallest real part. Only for general eigen solvers.
SmallestImag, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers.
SmallestAlge, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers.
BothEnds ///< Select eigenvalues half from each end of the spectrum. When
///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers.
};
/// \cond
// When comparing eigenvalues, we first calculate the "target" to sort.
// For example, if we want to choose the eigenvalues with
// largest magnitude, the target will be -abs(x).
// The minus sign is due to the fact that std::sort() sorts in ascending order.
// Default target: throw an exception
template <typename Scalar, SortRule Rule>
class SortingTarget
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
throw std::invalid_argument("incompatible selection rule");
return -abs(val);
}
};
// Specialization for SortRule::LargestMagn
// This covers [float, double, complex] x [SortRule::LargestMagn]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::LargestMagn>
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
return -abs(val);
}
};
// Specialization for SortRule::LargestReal
// This covers [complex] x [SortRule::LargestReal]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::LargestReal>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return -val.real();
}
};
// Specialization for SortRule::LargestImag
// This covers [complex] x [SortRule::LargestImag]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::LargestImag>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return -abs(val.imag());
}
};
// Specialization for SortRule::LargestAlge
// This covers [float, double] x [SortRule::LargestAlge]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::LargestAlge>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Here SortRule::BothEnds is the same as SortRule::LargestAlge, but
// we need some additional steps, which are done in
// SymEigsSolver.h => retrieve_ritzpair().
// There we move the smallest values to the proper locations.
template <typename Scalar>
class SortingTarget<Scalar, SortRule::BothEnds>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Specialization for SortRule::SmallestMagn
// This covers [float, double, complex] x [SortRule::SmallestMagn]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::SmallestMagn>
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
return abs(val);
}
};
// Specialization for SortRule::SmallestReal
// This covers [complex] x [SortRule::SmallestReal]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::SmallestReal>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return val.real();
}
};
// Specialization for SortRule::SmallestImag
// This covers [complex] x [SortRule::SmallestImag]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::SmallestImag>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return abs(val.imag());
}
};
// Specialization for SortRule::SmallestAlge
// This covers [float, double] x [SortRule::SmallestAlge]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::SmallestAlge>
{
public:
static Scalar get(const Scalar& val)
{
return val;
}
};
// Sort eigenvalues
template <typename T, SortRule Rule>
class SortEigenvalue
{
private:
using Index = Eigen::Index;
using IndexArray = std::vector<Index>;
const T* m_evals;
IndexArray m_index;
public:
// Sort indices according to the eigenvalues they point to
inline bool operator()(Index i, Index j)
{
return SortingTarget<T, Rule>::get(m_evals[i]) < SortingTarget<T, Rule>::get(m_evals[j]);
}
SortEigenvalue(const T* start, Index size) :
m_evals(start), m_index(size)
{
for (Index i = 0; i < size; i++)
{
m_index[i] = i;
}
std::sort(m_index.begin(), m_index.end(), *this);
}
inline IndexArray index() const { return m_index; }
inline void swap(IndexArray& other) { m_index.swap(other); }
};
// Sort values[:len] according to the selection rule, and return the indices
template <typename Scalar>
std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values, Eigen::Index len)
{
using Index = Eigen::Index;
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind;
switch (selection)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Scalar, SortRule::LargestMagn> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::BothEnds:
case SortRule::LargestAlge:
{
SortEigenvalue<Scalar, SortRule::LargestAlge> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Scalar, SortRule::SmallestMagn> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::SmallestAlge:
{
SortEigenvalue<Scalar, SortRule::SmallestAlge> sorting(values.data(), len);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported selection rule");
}
// For SortRule::BothEnds, the eigenvalues are sorted according to the
// SortRule::LargestAlge rule, so we need to move those smallest values to the left
// The order would be
// Largest => Smallest => 2nd largest => 2nd smallest => ...
// We keep this order since the first k values will always be
// the wanted collection, no matter k is nev_updated (used in SymEigsBase::restart())
// or is nev (used in SymEigsBase::sort_ritzpair())
if (selection == SortRule::BothEnds)
{
std::vector<Index> ind_copy(ind);
for (Index i = 0; i < len; i++)
{
// If i is even, pick values from the left (large values)
// If i is odd, pick values from the right (small values)
if (i % 2 == 0)
ind[i] = ind_copy[i / 2];
else
ind[i] = ind_copy[len - 1 - i / 2];
}
}
return ind;
}
// Default vector length
template <typename Scalar>
std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values)
{
return argsort<Scalar>(selection, values, values.size());
}
/// \endcond
} // namespace Spectra
#endif // SPECTRA_SELECTION_RULE_H
| 8,908 | 28.598007 | 127 | h |
abess | abess-master/include/Spectra/Util/SimpleRandom.h | // Copyright (C) 2016-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SIMPLE_RANDOM_H
#define SPECTRA_SIMPLE_RANDOM_H
#include <Eigen/Core>
/// \cond
namespace Spectra {
// We need a simple pseudo random number generator here:
// 1. It is used to generate initial and restarted residual vector.
// 2. It is not necessary to be so "random" and advanced. All we hope
// is that the residual vector is not in the space spanned by the
// current Krylov space. This should be met almost surely.
// 3. We don't want to call RNG in C++, since we actually want the
// algorithm to be deterministic. Also, calling RNG in C/C++ is not
// allowed in R packages submitted to CRAN.
// 4. The method should be as simple as possible, so an LCG is enough.
// 5. Based on public domain code by Ray Gardner
// http://stjarnhimlen.se/snippets/rg_rand.c
template <typename Scalar = double>
class SimpleRandom
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
static constexpr unsigned int m_a = 16807; // multiplier
static constexpr unsigned long m_max = 2147483647L; // 2^31 - 1
long m_rand; // RNG state
inline long next_long_rand(long seed) const
{
unsigned long lo, hi;
lo = m_a * (long) (seed & 0xFFFF);
hi = m_a * (long) ((unsigned long) seed >> 16);
lo += (hi & 0x7FFF) << 16;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
lo += hi >> 15;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
return (long) lo;
}
public:
SimpleRandom(unsigned long init_seed) :
m_rand(init_seed ? (init_seed & m_max) : 1)
{}
// Return a single random number, ranging from -0.5 to 0.5
Scalar random()
{
m_rand = next_long_rand(m_rand);
return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
// Fill the given vector with random numbers
// Ranging from -0.5 to 0.5
void random_vec(Vector& vec)
{
const Index len = vec.size();
for (Index i = 0; i < len; i++)
{
m_rand = next_long_rand(m_rand);
vec[i] = Scalar(m_rand);
}
vec.array() = vec.array() / Scalar(m_max) - Scalar(0.5);
}
// Return a vector of random numbers
// Ranging from -0.5 to 0.5
Vector random_vec(const Index len)
{
Vector res(len);
random_vec(res);
return res;
}
};
} // namespace Spectra
/// \endcond
#endif // SPECTRA_SIMPLE_RANDOM_H
| 2,841 | 27.42 | 70 | h |
abess | abess-master/include/Spectra/Util/TypeTraits.h | // Copyright (C) 2018-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_TYPE_TRAITS_H
#define SPECTRA_TYPE_TRAITS_H
#include <Eigen/Core>
#include <limits>
/// \cond
// Clang-Format will have unintended effects:
// static constexpr Scalar(min)()
// So we turn it off here
//
// clang-format off
namespace Spectra {
// For a real value type "Scalar", we want to know its smallest
// positive value, i.e., std::numeric_limits<Scalar>::min().
// However, we must take non-standard value types into account,
// so we rely on Eigen::NumTraits.
//
// Eigen::NumTraits has defined epsilon() and lowest(), but
// lowest() means negative highest(), which is a very small
// negative value.
//
// Therefore, we manually define this limit, and use eplison()^3
// to mimic it for non-standard types.
// Generic definition
template <typename Scalar>
struct TypeTraits
{
static constexpr Scalar epsilon()
{
return Eigen::numext::numeric_limits<Scalar>::epsilon();
}
static constexpr Scalar (min)()
{
return epsilon() * epsilon() * epsilon();
}
};
// Full specialization
template <>
struct TypeTraits<float>
{
static constexpr float epsilon()
{
return std::numeric_limits<float>::epsilon();
}
static constexpr float (min)()
{
return (std::numeric_limits<float>::min)();
}
};
template <>
struct TypeTraits<double>
{
static constexpr double epsilon()
{
return std::numeric_limits<double>::epsilon();
}
static constexpr double (min)()
{
return (std::numeric_limits<double>::min)();
}
};
template <>
struct TypeTraits<long double>
{
static constexpr long double epsilon()
{
return std::numeric_limits<long double>::epsilon();
}
static constexpr long double (min)()
{
return (std::numeric_limits<long double>::min)();
}
};
// Get the element type of a "scalar"
// ElemType<double> => double
// ElemType<std::complex<double>> => double
template <typename T>
using ElemType = typename Eigen::NumTraits<T>::Real;
} // namespace Spectra
/// \endcond
#endif // SPECTRA_TYPE_TRAITS_H
| 2,365 | 22.66 | 70 | h |
abess | abess-master/include/Spectra/Util/Version.h | // Copyright (C) 2020-2021 Yixuan Qiu <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_VERSION_H
#define SPECTRA_VERSION_H
#define SPECTRA_MAJOR_VERSION 1
#define SPECTRA_MINOR_VERSION 0
#define SPECTRA_PATCH_VERSION 0
#define SPECTRA_VERSION (SPECTRA_MAJOR_VERSION * 10000 + SPECTRA_MINOR_VERSION * 100 + SPECTRA_PATCH_VERSION)
#endif // SPECTRA_VERSION_H
| 556 | 31.764706 | 109 | h |
abess | abess-master/python/include/Eigen/src/Cholesky/LLT_LAPACKE.h | /*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* LLt decomposition based on LAPACKE_?potrf function.
********************************************************************************
*/
#ifndef EIGEN_LLT_LAPACKE_H
#define EIGEN_LLT_LAPACKE_H
namespace Eigen {
namespace internal {
template<typename Scalar> struct lapacke_llt;
#define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \
template<> struct lapacke_llt<EIGTYPE> \
{ \
template<typename MatrixType> \
static inline Index potrf(MatrixType& m, char uplo) \
{ \
lapack_int matrix_order; \
lapack_int size, lda, info, StorageOrder; \
EIGTYPE* a; \
eigen_assert(m.rows()==m.cols()); \
/* Set up parameters for ?potrf */ \
size = convert_index<lapack_int>(m.rows()); \
StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \
matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
a = &(m.coeffRef(0,0)); \
lda = convert_index<lapack_int>(m.outerStride()); \
\
info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \
info = (info==0) ? -1 : info>0 ? info-1 : size; \
return info; \
} \
}; \
template<> struct llt_inplace<EIGTYPE, Lower> \
{ \
template<typename MatrixType> \
static Index blocked(MatrixType& m) \
{ \
return lapacke_llt<EIGTYPE>::potrf(m, 'L'); \
} \
template<typename MatrixType, typename VectorType> \
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \
}; \
template<> struct llt_inplace<EIGTYPE, Upper> \
{ \
template<typename MatrixType> \
static Index blocked(MatrixType& m) \
{ \
return lapacke_llt<EIGTYPE>::potrf(m, 'U'); \
} \
template<typename MatrixType, typename VectorType> \
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ \
Transpose<MatrixType> matt(mat); \
return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \
} \
};
EIGEN_LAPACKE_LLT(double, double, d)
EIGEN_LAPACKE_LLT(float, float, s)
EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z)
EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LLT_LAPACKE_H
| 3,974 | 38.75 | 113 | h |
abess | abess-master/python/include/Eigen/src/Core/ArrayBase.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ARRAYBASE_H
#define EIGEN_ARRAYBASE_H
namespace Eigen {
template<typename ExpressionType> class MatrixWrapper;
/** \class ArrayBase
* \ingroup Core_Module
*
* \brief Base class for all 1D and 2D array, and related expressions
*
* An array is similar to a dense vector or matrix. While matrices are mathematical
* objects with well defined linear algebra operators, an array is just a collection
* of scalar values arranged in a one or two dimensionnal fashion. As the main consequence,
* all operations applied to an array are performed coefficient wise. Furthermore,
* arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient
* constructors allowing to easily write generic code working for both scalar values
* and arrays.
*
* This class is the base that is inherited by all array expression types.
*
* \tparam Derived is the derived type, e.g., an array or an expression type.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN.
*
* \sa class MatrixBase, \ref TopicClassHierarchy
*/
template<typename Derived> class ArrayBase
: public DenseBase<Derived>
{
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** The base class for a given storage type. */
typedef ArrayBase StorageBaseType;
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base;
using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime;
using Base::MaxRowsAtCompileTime;
using Base::MaxColsAtCompileTime;
using Base::MaxSizeAtCompileTime;
using Base::IsVectorAtCompileTime;
using Base::Flags;
using Base::derived;
using Base::const_cast_derived;
using Base::rows;
using Base::cols;
using Base::size;
using Base::coeff;
using Base::coeffRef;
using Base::lazyAssign;
using Base::operator=;
using Base::operator+=;
using Base::operator-=;
using Base::operator*=;
using Base::operator/=;
typedef typename Base::CoeffReturnType CoeffReturnType;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef typename Base::PlainObject PlainObject;
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase
#define EIGEN_DOC_UNARY_ADDONS(X,Y)
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
# include "../plugins/ArrayCwiseUnaryOps.h"
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseBinaryOps.h"
# include "../plugins/ArrayCwiseBinaryOps.h"
# ifdef EIGEN_ARRAYBASE_PLUGIN
# include EIGEN_ARRAYBASE_PLUGIN
# endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_UNARY_ADDONS
/** Special case of the template operator=, in order to prevent the compiler
* from generating a default operator= (issue hit with g++ 4.1)
*/
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator=(const ArrayBase& other)
{
internal::call_assignment(derived(), other.derived());
return derived();
}
/** Set all the entries to \a value.
* \sa DenseBase::setConstant(), DenseBase::fill() */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator=(const Scalar &value)
{ Base::setConstant(value); return derived(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator+=(const Scalar& scalar);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator-=(const Scalar& scalar);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator+=(const ArrayBase<OtherDerived>& other);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator-=(const ArrayBase<OtherDerived>& other);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator*=(const ArrayBase<OtherDerived>& other);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator/=(const ArrayBase<OtherDerived>& other);
public:
EIGEN_DEVICE_FUNC
ArrayBase<Derived>& array() { return *this; }
EIGEN_DEVICE_FUNC
const ArrayBase<Derived>& array() const { return *this; }
/** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array
* \sa MatrixBase::array() */
EIGEN_DEVICE_FUNC
MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); }
EIGEN_DEVICE_FUNC
const MatrixWrapper<const Derived> matrix() const { return MatrixWrapper<const Derived>(derived()); }
// template<typename Dest>
// inline void evalTo(Dest& dst) const { dst = matrix(); }
protected:
EIGEN_DEVICE_FUNC
ArrayBase() : Base() {}
private:
explicit ArrayBase(Index);
ArrayBase(Index,Index);
template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);
protected:
// mixing arrays and matrices is not legal
template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& )
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
// mixing arrays and matrices is not legal
template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& )
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
};
/** replaces \c *this by \c *this - \a other.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
/** replaces \c *this by \c *this + \a other.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
/** replaces \c *this by \c *this * \a other coefficient wise.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
/** replaces \c *this by \c *this / \a other coefficient wise.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
} // end namespace Eigen
#endif // EIGEN_ARRAYBASE_H
| 8,179 | 35.035242 | 134 | h |
abess | abess-master/python/include/Eigen/src/Core/ArrayWrapper.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ARRAYWRAPPER_H
#define EIGEN_ARRAYWRAPPER_H
namespace Eigen {
/** \class ArrayWrapper
* \ingroup Core_Module
*
* \brief Expression of a mathematical vector or matrix as an array object
*
* This class is the return type of MatrixBase::array(), and most of the time
* this is the only way it is use.
*
* \sa MatrixBase::array(), class MatrixWrapper
*/
namespace internal {
template<typename ExpressionType>
struct traits<ArrayWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef ArrayXpr XprKind;
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
};
};
}
template<typename ExpressionType>
class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
{
public:
typedef ArrayBase<ArrayWrapper> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
typedef typename internal::remove_all<ExpressionType>::type NestedExpression;
typedef typename internal::conditional<
internal::is_lvalue<ExpressionType>::value,
Scalar,
const Scalar
>::type ScalarWithConstIfNotLvalue;
typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
using Base::coeffRef;
EIGEN_DEVICE_FUNC
explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_expression.rows(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_expression.cols(); }
EIGEN_DEVICE_FUNC
inline Index outerStride() const { return m_expression.outerStride(); }
EIGEN_DEVICE_FUNC
inline Index innerStride() const { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
EIGEN_DEVICE_FUNC
inline const Scalar* data() const { return m_expression.data(); }
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index rowId, Index colId) const
{
return m_expression.coeffRef(rowId, colId);
}
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index index) const
{
return m_expression.coeffRef(index);
}
template<typename Dest>
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const { dst = m_expression; }
const typename internal::remove_all<NestedExpressionType>::type&
EIGEN_DEVICE_FUNC
nestedExpression() const
{
return m_expression;
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index) */
EIGEN_DEVICE_FUNC
void resize(Index newSize) { m_expression.resize(newSize); }
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index,Index)*/
EIGEN_DEVICE_FUNC
void resize(Index rows, Index cols) { m_expression.resize(rows,cols); }
protected:
NestedExpressionType m_expression;
};
/** \class MatrixWrapper
* \ingroup Core_Module
*
* \brief Expression of an array as a mathematical vector or matrix
*
* This class is the return type of ArrayBase::matrix(), and most of the time
* this is the only way it is use.
*
* \sa MatrixBase::matrix(), class ArrayWrapper
*/
namespace internal {
template<typename ExpressionType>
struct traits<MatrixWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef MatrixXpr XprKind;
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
};
};
}
template<typename ExpressionType>
class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
{
public:
typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
typedef typename internal::remove_all<ExpressionType>::type NestedExpression;
typedef typename internal::conditional<
internal::is_lvalue<ExpressionType>::value,
Scalar,
const Scalar
>::type ScalarWithConstIfNotLvalue;
typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
using Base::coeffRef;
EIGEN_DEVICE_FUNC
explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_expression.rows(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_expression.cols(); }
EIGEN_DEVICE_FUNC
inline Index outerStride() const { return m_expression.outerStride(); }
EIGEN_DEVICE_FUNC
inline Index innerStride() const { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
EIGEN_DEVICE_FUNC
inline const Scalar* data() const { return m_expression.data(); }
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index rowId, Index colId) const
{
return m_expression.derived().coeffRef(rowId, colId);
}
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index index) const
{
return m_expression.coeffRef(index);
}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<NestedExpressionType>::type&
nestedExpression() const
{
return m_expression;
}
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index) */
EIGEN_DEVICE_FUNC
void resize(Index newSize) { m_expression.resize(newSize); }
/** Forwards the resizing request to the nested expression
* \sa DenseBase::resize(Index,Index)*/
EIGEN_DEVICE_FUNC
void resize(Index rows, Index cols) { m_expression.resize(rows,cols); }
protected:
NestedExpressionType m_expression;
};
} // end namespace Eigen
#endif // EIGEN_ARRAYWRAPPER_H
| 6,775 | 31.266667 | 97 | h |
abess | abess-master/python/include/Eigen/src/Core/BooleanRedux.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ALLANDANY_H
#define EIGEN_ALLANDANY_H
namespace Eigen {
namespace internal {
template<typename Derived, int UnrollCount>
struct all_unroller
{
typedef typename Derived::ExpressionTraits Traits;
enum {
col = (UnrollCount-1) / Traits::RowsAtCompileTime,
row = (UnrollCount-1) % Traits::RowsAtCompileTime
};
static inline bool run(const Derived &mat)
{
return all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col);
}
};
template<typename Derived>
struct all_unroller<Derived, 0>
{
static inline bool run(const Derived &/*mat*/) { return true; }
};
template<typename Derived>
struct all_unroller<Derived, Dynamic>
{
static inline bool run(const Derived &) { return false; }
};
template<typename Derived, int UnrollCount>
struct any_unroller
{
typedef typename Derived::ExpressionTraits Traits;
enum {
col = (UnrollCount-1) / Traits::RowsAtCompileTime,
row = (UnrollCount-1) % Traits::RowsAtCompileTime
};
static inline bool run(const Derived &mat)
{
return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
}
};
template<typename Derived>
struct any_unroller<Derived, 0>
{
static inline bool run(const Derived & /*mat*/) { return false; }
};
template<typename Derived>
struct any_unroller<Derived, Dynamic>
{
static inline bool run(const Derived &) { return false; }
};
} // end namespace internal
/** \returns true if all coefficients are true
*
* Example: \include MatrixBase_all.cpp
* Output: \verbinclude MatrixBase_all.out
*
* \sa any(), Cwise::operator<()
*/
template<typename Derived>
inline bool DenseBase<Derived>::all() const
{
typedef internal::evaluator<Derived> Evaluator;
enum {
unroll = SizeAtCompileTime != Dynamic
&& SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
};
Evaluator evaluator(derived());
if(unroll)
return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator);
else
{
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
if (!evaluator.coeff(i, j)) return false;
return true;
}
}
/** \returns true if at least one coefficient is true
*
* \sa all()
*/
template<typename Derived>
inline bool DenseBase<Derived>::any() const
{
typedef internal::evaluator<Derived> Evaluator;
enum {
unroll = SizeAtCompileTime != Dynamic
&& SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
};
Evaluator evaluator(derived());
if(unroll)
return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator);
else
{
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
if (evaluator.coeff(i, j)) return true;
return false;
}
}
/** \returns the number of coefficients which evaluate to true
*
* \sa all(), any()
*/
template<typename Derived>
inline Eigen::Index DenseBase<Derived>::count() const
{
return derived().template cast<bool>().template cast<Index>().sum();
}
/** \returns true is \c *this contains at least one Not A Number (NaN).
*
* \sa allFinite()
*/
template<typename Derived>
inline bool DenseBase<Derived>::hasNaN() const
{
#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)
return derived().array().isNaN().any();
#else
return !((derived().array()==derived().array()).all());
#endif
}
/** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values.
*
* \sa hasNaN()
*/
template<typename Derived>
inline bool DenseBase<Derived>::allFinite() const
{
#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)
return derived().array().isFinite().all();
#else
return !((derived()-derived()).hasNaN());
#endif
}
} // end namespace Eigen
#endif // EIGEN_ALLANDANY_H
| 4,249 | 24.757576 | 113 | h |
abess | abess-master/python/include/Eigen/src/Core/CommaInitializer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMMAINITIALIZER_H
#define EIGEN_COMMAINITIALIZER_H
namespace Eigen {
/** \class CommaInitializer
* \ingroup Core_Module
*
* \brief Helper class used by the comma initializer operator
*
* This class is internally used to implement the comma initializer feature. It is
* the return type of MatrixBase::operator<<, and most of the time this is the only
* way it is used.
*
* \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/
template<typename XprType>
struct CommaInitializer
{
typedef typename XprType::Scalar Scalar;
EIGEN_DEVICE_FUNC
inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
m_xpr.coeffRef(0,0) = s;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
EIGEN_DEVICE_FUNC
inline CommaInitializer(const CommaInitializer& o)
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
}
/* inserts a scalar value in the target matrix */
EIGEN_DEVICE_FUNC
CommaInitializer& operator,(const Scalar& s)
{
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = 1;
eigen_assert(m_row<m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==1);
m_xpr.coeffRef(m_row, m_col++) = s;
return *this;
}
/* inserts a matrix expression in the target matrix */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows))
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = other.rows();
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert((m_col + other.cols() <= m_xpr.cols())
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==other.rows());
m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>
(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols();
return *this;
}
EIGEN_DEVICE_FUNC
inline ~CommaInitializer()
#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)
#endif
{
finished();
}
/** \returns the built matrix once all its coefficients have been set.
* Calling finished is 100% optional. Its purpose is to write expressions
* like this:
* \code
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
EIGEN_DEVICE_FUNC
inline XprType& finished() {
eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0)
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
return m_xpr;
}
XprType& m_xpr; // target expression
Index m_row; // current row id
Index m_col; // current col id
Index m_currentBlockRows; // current block height
};
/** \anchor MatrixBaseCommaInitRef
* Convenient operator to set the coefficients of a matrix.
*
* The coefficients must be provided in a row major order and exactly match
* the size of the matrix. Otherwise an assertion is raised.
*
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
*
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template<typename Derived>
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
{
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
}
/** \sa operator<<(const Scalar&) */
template<typename Derived>
template<typename OtherDerived>
inline CommaInitializer<Derived>
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
{
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
}
} // end namespace Eigen
#endif // EIGEN_COMMAINITIALIZER_H
| 5,689 | 34.341615 | 122 | h |
abess | abess-master/python/include/Eigen/src/Core/ConditionEstimator.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen ([email protected])
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONDITIONESTIMATOR_H
#define EIGEN_CONDITIONESTIMATOR_H
namespace Eigen {
namespace internal {
template <typename Vector, typename RealVector, bool IsComplex>
struct rcond_compute_sign {
static inline Vector run(const Vector& v) {
const RealVector v_abs = v.cwiseAbs();
return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
.select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
}
};
// Partial specialization to avoid elementwise division for real vectors.
template <typename Vector>
struct rcond_compute_sign<Vector, Vector, false> {
static inline Vector run(const Vector& v) {
return (v.array() < static_cast<typename Vector::RealScalar>(0))
.select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
}
};
/**
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
* \a matrix that implements .solve() and .adjoint().solve() methods.
*
* This function implements Algorithms 4.1 and 5.1 from
* http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
* which also forms the basis for the condition number estimators in
* LAPACK. Since at most 10 calls to the solve method of dec are
* performed, the total cost is O(dims^2), as opposed to O(dims^3)
* needed to compute the inverse matrix explicitly.
*
* The most common usage is in estimating the condition number
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
* computed directly in O(n^2) operations.
*
* Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
* LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template <typename Decomposition>
typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec)
{
typedef typename Decomposition::MatrixType MatrixType;
typedef typename Decomposition::Scalar Scalar;
typedef typename Decomposition::RealScalar RealScalar;
typedef typename internal::plain_col_type<MatrixType>::type Vector;
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
eigen_assert(dec.rows() == dec.cols());
const Index n = dec.rows();
if (n == 0)
return 0;
// Disable Index to float conversion warning
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
// lower_bound is a lower bound on
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
// and is the objective maximized by the ("super-") gradient ascent
// algorithm below.
RealScalar lower_bound = v.template lpNorm<1>();
if (n == 1)
return lower_bound;
// Gradient ascent algorithm follows: We know that the optimum is achieved at
// one of the simplices v = e_i, so in each iteration we follow a
// super-gradient to move towards the optimal one.
RealScalar old_lower_bound = lower_bound;
Vector sign_vector(n);
Vector old_sign_vector;
Index v_max_abs_index = -1;
Index old_v_max_abs_index = v_max_abs_index;
for (int k = 0; k < 4; ++k)
{
sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
// Break if the solution stagnated.
break;
}
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
v = dec.adjoint().solve(sign_vector);
v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
if (v_max_abs_index == old_v_max_abs_index) {
// Break if the solution stagnated.
break;
}
// Move to the new simplex e_j, where j = v_max_abs_index.
v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
lower_bound = v.template lpNorm<1>();
if (lower_bound <= old_lower_bound) {
// Break if the gradient step did not increase the lower_bound.
break;
}
if (!is_complex) {
old_sign_vector = sign_vector;
}
old_v_max_abs_index = v_max_abs_index;
old_lower_bound = lower_bound;
}
// The following calculates an independent estimate of ||matrix||_1 by
// multiplying matrix by a vector with entries of slowly increasing
// magnitude and alternating sign:
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
// This improvement to Hager's algorithm above is due to Higham. It was
// added to make the algorithm more robust in certain corner cases where
// large elements in the matrix might otherwise escape detection due to
// exact cancellation (especially when op and op_adjoint correspond to a
// sequence of backsubstitutions and permutations), which could cause
// Hager's algorithm to vastly underestimate ||matrix||_1.
Scalar alternating_sign(RealScalar(1));
for (Index i = 0; i < n; ++i) {
// The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
alternating_sign = -alternating_sign;
}
v = dec.solve(v);
const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
return numext::maxi(lower_bound, alternate_lower_bound);
}
/** \brief Reciprocal condition number estimator.
*
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
* this method estimates the condition number quickly and reliably in O(n^2)
* operations.
*
* \returns an estimate of the reciprocal condition number
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
* its decomposition. Supports the following decompositions: FullPivLU,
* PartialPivLU, LDLT, and LLT.
*
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
*/
template <typename Decomposition>
typename Decomposition::RealScalar
rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec)
{
typedef typename Decomposition::RealScalar RealScalar;
eigen_assert(dec.rows() == dec.cols());
if (dec.rows() == 0) return RealScalar(1);
if (matrix_norm == RealScalar(0)) return RealScalar(0);
if (dec.rows() == 1) return RealScalar(1);
const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)
: (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
}
} // namespace internal
} // namespace Eigen
#endif
| 6,970 | 38.607955 | 109 | h |
abess | abess-master/python/include/Eigen/src/Core/CoreIterators.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COREITERATORS_H
#define EIGEN_COREITERATORS_H
namespace Eigen {
/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
*/
namespace internal {
template<typename XprType, typename EvaluatorKind>
class inner_iterator_selector;
}
/** \class InnerIterator
* \brief An InnerIterator allows to loop over the element of any matrix expression.
*
* \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed.
*
* TODO: add a usage example
*/
template<typename XprType>
class InnerIterator
{
protected:
typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;
typedef internal::evaluator<XprType> EvaluatorType;
typedef typename internal::traits<XprType>::Scalar Scalar;
public:
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
InnerIterator(const XprType &xpr, const Index &outerId)
: m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize())
{}
/// \returns the value of the current coefficient.
EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); }
/** Increment the iterator \c *this to the next non-zero coefficient.
* Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView
*/
EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; }
/// \returns the column or row index of the current coefficient.
EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
/// \returns the row index of the current coefficient.
EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
/// \returns the column index of the current coefficient.
EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
/// \returns \c true if the iterator \c *this still references a valid coefficient.
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
protected:
EvaluatorType m_eval;
IteratorType m_iter;
private:
// If you get here, then you're not using the right InnerIterator type, e.g.:
// SparseMatrix<double,RowMajor> A;
// SparseMatrix<double>::InnerIterator it(A,0);
template<typename T> InnerIterator(const EigenBase<T>&,Index outer);
};
namespace internal {
// Generic inner iterator implementation for dense objects
template<typename XprType>
class inner_iterator_selector<XprType, IndexBased>
{
protected:
typedef evaluator<XprType> EvaluatorType;
typedef typename traits<XprType>::Scalar Scalar;
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize)
: m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize)
{}
EIGEN_STRONG_INLINE Scalar value() const
{
return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner)
: m_eval.coeff(m_inner, m_outer);
}
EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; }
EIGEN_STRONG_INLINE Index index() const { return m_inner; }
inline Index row() const { return IsRowMajor ? m_outer : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const EvaluatorType& m_eval;
Index m_inner;
const Index m_outer;
const Index m_end;
};
// For iterator-based evaluator, inner-iterator is already implemented as
// evaluator<>::InnerIterator
template<typename XprType>
class inner_iterator_selector<XprType, IteratorBased>
: public evaluator<XprType>::InnerIterator
{
protected:
typedef typename evaluator<XprType>::InnerIterator Base;
typedef evaluator<XprType> EvaluatorType;
public:
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/)
: Base(eval, outerId)
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_COREITERATORS_H
| 4,525 | 34.359375 | 122 | h |
abess | abess-master/python/include/Eigen/src/Core/CwiseTernaryOp.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
// Copyright (C) 2016 Eugene Brevdo <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CWISE_TERNARY_OP_H
#define EIGEN_CWISE_TERNARY_OP_H
namespace Eigen {
namespace internal {
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > {
// we must not inherit from traits<Arg1> since it has
// the potential to cause problems with MSVC
typedef typename remove_all<Arg1>::type Ancestor;
typedef typename traits<Ancestor>::XprKind XprKind;
enum {
RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
};
// even though we require Arg1, Arg2, and Arg3 to have the same scalar type
// (see CwiseTernaryOp constructor),
// we still want to handle the case when the result type is different.
typedef typename result_of<TernaryOp(
const typename Arg1::Scalar&, const typename Arg2::Scalar&,
const typename Arg3::Scalar&)>::type Scalar;
typedef typename internal::traits<Arg1>::StorageKind StorageKind;
typedef typename internal::traits<Arg1>::StorageIndex StorageIndex;
typedef typename Arg1::Nested Arg1Nested;
typedef typename Arg2::Nested Arg2Nested;
typedef typename Arg3::Nested Arg3Nested;
typedef typename remove_reference<Arg1Nested>::type _Arg1Nested;
typedef typename remove_reference<Arg2Nested>::type _Arg2Nested;
typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;
enum { Flags = _Arg1Nested::Flags & RowMajorBit };
};
} // end namespace internal
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,
typename StorageKind>
class CwiseTernaryOpImpl;
/** \class CwiseTernaryOp
* \ingroup Core_Module
*
* \brief Generic expression where a coefficient-wise ternary operator is
* applied to two expressions
*
* \tparam TernaryOp template functor implementing the operator
* \tparam Arg1Type the type of the first argument
* \tparam Arg2Type the type of the second argument
* \tparam Arg3Type the type of the third argument
*
* This class represents an expression where a coefficient-wise ternary
* operator is applied to three expressions.
* It is the return type of ternary operators, by which we mean only those
* ternary operators where
* all three arguments are Eigen expressions.
* For example, the return type of betainc(matrix1, matrix2, matrix3) is a
* CwiseTernaryOp.
*
* Most of the time, this is the only way that it is used, so you typically
* don't have to name
* CwiseTernaryOp types explicitly.
*
* \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const
* MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,
* class CwiseUnaryOp, class CwiseNullaryOp
*/
template <typename TernaryOp, typename Arg1Type, typename Arg2Type,
typename Arg3Type>
class CwiseTernaryOp : public CwiseTernaryOpImpl<
TernaryOp, Arg1Type, Arg2Type, Arg3Type,
typename internal::traits<Arg1Type>::StorageKind>,
internal::no_assignment_operator
{
public:
typedef typename internal::remove_all<Arg1Type>::type Arg1;
typedef typename internal::remove_all<Arg2Type>::type Arg2;
typedef typename internal::remove_all<Arg3Type>::type Arg3;
typedef typename CwiseTernaryOpImpl<
TernaryOp, Arg1Type, Arg2Type, Arg3Type,
typename internal::traits<Arg1Type>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp)
typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested;
typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested;
typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested;
typedef typename internal::remove_reference<Arg1Nested>::type _Arg1Nested;
typedef typename internal::remove_reference<Arg2Nested>::type _Arg2Nested;
typedef typename internal::remove_reference<Arg3Nested>::type _Arg3Nested;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2,
const Arg3& a3,
const TernaryOp& func = TernaryOp())
: m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) {
// require the sizes to match
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3)
// The index types should match
EIGEN_STATIC_ASSERT((internal::is_same<
typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg2Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
EIGEN_STATIC_ASSERT((internal::is_same<
typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg3Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() &&
a1.rows() == a3.rows() && a1.cols() == a3.cols());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index rows() const {
// return the fixed size type if available to enable compile time
// optimizations
if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
RowsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg2Nested>::type>::
RowsAtCompileTime == Dynamic)
return m_arg3.rows();
else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
RowsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg3Nested>::type>::
RowsAtCompileTime == Dynamic)
return m_arg2.rows();
else
return m_arg1.rows();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index cols() const {
// return the fixed size type if available to enable compile time
// optimizations
if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
ColsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg2Nested>::type>::
ColsAtCompileTime == Dynamic)
return m_arg3.cols();
else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::
ColsAtCompileTime == Dynamic &&
internal::traits<typename internal::remove_all<Arg3Nested>::type>::
ColsAtCompileTime == Dynamic)
return m_arg2.cols();
else
return m_arg1.cols();
}
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg1Nested& arg1() const { return m_arg1; }
/** \returns the first argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg2Nested& arg2() const { return m_arg2; }
/** \returns the third argument nested expression */
EIGEN_DEVICE_FUNC
const _Arg3Nested& arg3() const { return m_arg3; }
/** \returns the functor representing the ternary operation */
EIGEN_DEVICE_FUNC
const TernaryOp& functor() const { return m_functor; }
protected:
Arg1Nested m_arg1;
Arg2Nested m_arg2;
Arg3Nested m_arg3;
const TernaryOp m_functor;
};
// Generic API dispatcher
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,
typename StorageKind>
class CwiseTernaryOpImpl
: public internal::generic_xpr_base<
CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type {
public:
typedef typename internal::generic_xpr_base<
CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type Base;
};
} // end namespace Eigen
#endif // EIGEN_CWISE_TERNARY_OP_H
| 8,256 | 40.70202 | 83 | h |
abess | abess-master/python/include/Eigen/src/Core/DiagonalProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2007-2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DIAGONALPRODUCT_H
#define EIGEN_DIAGONALPRODUCT_H
namespace Eigen {
/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
*/
template<typename Derived>
template<typename DiagonalDerived>
inline const Product<Derived, DiagonalDerived, LazyProduct>
MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
{
return Product<Derived, DiagonalDerived, LazyProduct>(derived(),a_diagonal.derived());
}
} // end namespace Eigen
#endif // EIGEN_DIAGONALPRODUCT_H
| 970 | 32.482759 | 88 | h |
abess | abess-master/python/include/Eigen/src/Core/EigenBase.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EIGENBASE_H
#define EIGEN_EIGENBASE_H
namespace Eigen {
/** \class EigenBase
* \ingroup Core_Module
*
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
*
* In other words, an EigenBase object is an object that can be copied into a MatrixBase.
*
* Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.
*
* Notice that this class is trivial, it is only used to disambiguate overloaded functions.
*
* \sa \blank \ref TopicClassHierarchy
*/
template<typename Derived> struct EigenBase
{
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
/** \brief The interface type of indices
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
* \sa StorageIndex, \ref TopicPreprocessorDirectives.
*/
typedef Eigen::Index Index;
// FIXME is it needed?
typedef typename internal::traits<Derived>::StorageKind StorageKind;
/** \returns a reference to the derived object */
EIGEN_DEVICE_FUNC
Derived& derived() { return *static_cast<Derived*>(this); }
/** \returns a const reference to the derived object */
EIGEN_DEVICE_FUNC
const Derived& derived() const { return *static_cast<const Derived*>(this); }
EIGEN_DEVICE_FUNC
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
EIGEN_DEVICE_FUNC
inline const Derived& const_derived() const
{ return *static_cast<const Derived*>(this); }
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
EIGEN_DEVICE_FUNC
inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
EIGEN_DEVICE_FUNC
inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
EIGEN_DEVICE_FUNC
inline Index size() const { return rows() * cols(); }
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
template<typename Dest>
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const
{ derived().evalTo(dst); }
/** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */
template<typename Dest>
EIGEN_DEVICE_FUNC
inline void addTo(Dest& dst) const
{
// This is the default implementation,
// derived class can reimplement it in a more optimized way.
typename Dest::PlainObject res(rows(),cols());
evalTo(res);
dst += res;
}
/** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */
template<typename Dest>
EIGEN_DEVICE_FUNC
inline void subTo(Dest& dst) const
{
// This is the default implementation,
// derived class can reimplement it in a more optimized way.
typename Dest::PlainObject res(rows(),cols());
evalTo(res);
dst -= res;
}
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */
template<typename Dest>
EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const
{
// This is the default implementation,
// derived class can reimplement it in a more optimized way.
dst = dst * this->derived();
}
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */
template<typename Dest>
EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const
{
// This is the default implementation,
// derived class can reimplement it in a more optimized way.
dst = this->derived() * dst;
}
};
/***************************************************************************
* Implementation of matrix base methods
***************************************************************************/
/** \brief Copies the generic expression \a other into *this.
*
* \details The expression must provide a (templated) evalTo(Derived& dst) const
* function which does the actual job. In practice, this allows any user to write
* its own special matrix without having to modify MatrixBase
*
* \returns a reference to *this.
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
} // end namespace Eigen
#endif // EIGEN_EIGENBASE_H
| 5,619 | 34.125 | 113 | h |
abess | abess-master/python/include/Eigen/src/Core/GenericPacketMath.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERIC_PACKET_MATH_H
#define EIGEN_GENERIC_PACKET_MATH_H
namespace Eigen {
namespace internal {
/** \internal
* \file GenericPacketMath.h
*
* Default implementation for types not supported by the vectorization.
* In practice these functions are provided to make easier the writing
* of generic vectorized code.
*/
#ifndef EIGEN_DEBUG_ALIGNED_LOAD
#define EIGEN_DEBUG_ALIGNED_LOAD
#endif
#ifndef EIGEN_DEBUG_UNALIGNED_LOAD
#define EIGEN_DEBUG_UNALIGNED_LOAD
#endif
#ifndef EIGEN_DEBUG_ALIGNED_STORE
#define EIGEN_DEBUG_ALIGNED_STORE
#endif
#ifndef EIGEN_DEBUG_UNALIGNED_STORE
#define EIGEN_DEBUG_UNALIGNED_STORE
#endif
struct default_packet_traits
{
enum {
HasHalfPacket = 0,
HasAdd = 1,
HasSub = 1,
HasMul = 1,
HasNegate = 1,
HasAbs = 1,
HasArg = 0,
HasAbs2 = 1,
HasMin = 1,
HasMax = 1,
HasConj = 1,
HasSetLinear = 1,
HasBlend = 0,
HasDiv = 0,
HasSqrt = 0,
HasRsqrt = 0,
HasExp = 0,
HasLog = 0,
HasLog1p = 0,
HasLog10 = 0,
HasPow = 0,
HasSin = 0,
HasCos = 0,
HasTan = 0,
HasASin = 0,
HasACos = 0,
HasATan = 0,
HasSinh = 0,
HasCosh = 0,
HasTanh = 0,
HasLGamma = 0,
HasDiGamma = 0,
HasZeta = 0,
HasPolygamma = 0,
HasErf = 0,
HasErfc = 0,
HasIGamma = 0,
HasIGammac = 0,
HasBetaInc = 0,
HasRound = 0,
HasFloor = 0,
HasCeil = 0,
HasSign = 0
};
};
template<typename T> struct packet_traits : default_packet_traits
{
typedef T type;
typedef T half;
enum {
Vectorizable = 0,
size = 1,
AlignedOnScalar = 0,
HasHalfPacket = 0
};
enum {
HasAdd = 0,
HasSub = 0,
HasMul = 0,
HasNegate = 0,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
HasMax = 0,
HasConj = 0,
HasSetLinear = 0
};
};
template<typename T> struct packet_traits<const T> : packet_traits<T> { };
template <typename Src, typename Tgt> struct type_casting_traits {
enum {
VectorizedCast = 0,
SrcCoeffRatio = 1,
TgtCoeffRatio = 1
};
};
/** \internal \returns static_cast<TgtType>(a) (coeff-wise) */
template <typename SrcPacket, typename TgtPacket>
EIGEN_DEVICE_FUNC inline TgtPacket
pcast(const SrcPacket& a) {
return static_cast<TgtPacket>(a);
}
template <typename SrcPacket, typename TgtPacket>
EIGEN_DEVICE_FUNC inline TgtPacket
pcast(const SrcPacket& a, const SrcPacket& /*b*/) {
return static_cast<TgtPacket>(a);
}
template <typename SrcPacket, typename TgtPacket>
EIGEN_DEVICE_FUNC inline TgtPacket
pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) {
return static_cast<TgtPacket>(a);
}
/** \internal \returns a + b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
padd(const Packet& a,
const Packet& b) { return a+b; }
/** \internal \returns a - b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
psub(const Packet& a,
const Packet& b) { return a-b; }
/** \internal \returns -a (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pnegate(const Packet& a) { return -a; }
/** \internal \returns conj(a) (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pconj(const Packet& a) { return numext::conj(a); }
/** \internal \returns a * b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmul(const Packet& a,
const Packet& b) { return a*b; }
/** \internal \returns a / b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pdiv(const Packet& a,
const Packet& b) { return a/b; }
/** \internal \returns the min of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmin(const Packet& a,
const Packet& b) { return numext::mini(a, b); }
/** \internal \returns the max of \a a and \a b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmax(const Packet& a,
const Packet& b) { return numext::maxi(a, b); }
/** \internal \returns the absolute value of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pabs(const Packet& a) { using std::abs; return abs(a); }
/** \internal \returns the phase angle of \a a */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
parg(const Packet& a) { using numext::arg; return arg(a); }
/** \internal \returns the bitwise and of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pand(const Packet& a, const Packet& b) { return a & b; }
/** \internal \returns the bitwise or of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
por(const Packet& a, const Packet& b) { return a | b; }
/** \internal \returns the bitwise xor of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pxor(const Packet& a, const Packet& b) { return a ^ b; }
/** \internal \returns the bitwise andnot of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pandnot(const Packet& a, const Packet& b) { return a & (!b); }
/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
/** \internal \returns a packet version of \a *from, (un-aligned load) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); }
/** \internal \returns a packet with elements of \a *from duplicated.
* For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and
* duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]}
* Currently, this function is only used for scalar * complex products.
*/
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
/** \internal \returns a packet with elements of \a *from quadrupled.
* For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
* replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
* Currently, this function is only used in matrix products.
* For packet-size smaller or equal to 4, this function is equivalent to pload1
*/
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
ploadquad(const typename unpacket_traits<Packet>::type* from)
{ return pload1<Packet>(from); }
/** \internal equivalent to
* \code
* a0 = pload1(a+0);
* a1 = pload1(a+1);
* a2 = pload1(a+2);
* a3 = pload1(a+3);
* \endcode
* \sa pset1, pload1, ploaddup, pbroadcast2
*/
template<typename Packet> EIGEN_DEVICE_FUNC
inline void pbroadcast4(const typename unpacket_traits<Packet>::type *a,
Packet& a0, Packet& a1, Packet& a2, Packet& a3)
{
a0 = pload1<Packet>(a+0);
a1 = pload1<Packet>(a+1);
a2 = pload1<Packet>(a+2);
a3 = pload1<Packet>(a+3);
}
/** \internal equivalent to
* \code
* a0 = pload1(a+0);
* a1 = pload1(a+1);
* \endcode
* \sa pset1, pload1, ploaddup, pbroadcast4
*/
template<typename Packet> EIGEN_DEVICE_FUNC
inline void pbroadcast2(const typename unpacket_traits<Packet>::type *a,
Packet& a0, Packet& a1)
{
a0 = pload1<Packet>(a+0);
a1 = pload1<Packet>(a+1);
}
/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
plset(const typename unpacket_traits<Packet>::type& a) { return a; }
/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from)
{ (*to) = from; }
/** \internal copy the packet \a from to \a *to, (un-aligned store) */
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
{ (*to) = from; }
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/)
{ return ploadu<Packet>(from); }
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/)
{ pstore(to, from); }
/** \internal tries to do cache prefetching of \a addr */
template<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr)
{
#ifdef __CUDA_ARCH__
#if defined(__LP64__)
// 64-bit pointer operand constraint for inlined asm
asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
#else
// 32-bit pointer operand constraint for inlined asm
asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr));
#endif
#elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC)
__builtin_prefetch(addr);
#endif
}
/** \internal \returns the first element of a packet */
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type pfirst(const Packet& a)
{ return a; }
/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
preduxp(const Packet* vecs) { return vecs[0]; }
/** \internal \returns the sum of the elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a)
{ return a; }
/** \internal \returns the sum of the elements of \a a by block of 4 elements.
* For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
* For packet-size smaller or equal to 4, this boils down to a noop.
*/
template<typename Packet> EIGEN_DEVICE_FUNC inline
typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type
predux_downto4(const Packet& a)
{ return a; }
/** \internal \returns the product of the elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
{ return a; }
/** \internal \returns the min of the elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)
{ return a; }
/** \internal \returns the max of the elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)
{ return a; }
/** \internal \returns the reversed elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a)
{ return a; }
/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a)
{
// FIXME: uncomment the following in case we drop the internal imag and real functions.
// using std::imag;
// using std::real;
return Packet(imag(a),real(a));
}
/**************************
* Special math functions
***************************/
/** \internal \returns the sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet psin(const Packet& a) { using std::sin; return sin(a); }
/** \internal \returns the cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pcos(const Packet& a) { using std::cos; return cos(a); }
/** \internal \returns the tan of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet ptan(const Packet& a) { using std::tan; return tan(a); }
/** \internal \returns the arc sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pasin(const Packet& a) { using std::asin; return asin(a); }
/** \internal \returns the arc cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pacos(const Packet& a) { using std::acos; return acos(a); }
/** \internal \returns the arc tangent of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet patan(const Packet& a) { using std::atan; return atan(a); }
/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet psinh(const Packet& a) { using std::sinh; return sinh(a); }
/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pcosh(const Packet& a) { using std::cosh; return cosh(a); }
/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet ptanh(const Packet& a) { using std::tanh; return tanh(a); }
/** \internal \returns the exp of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pexp(const Packet& a) { using std::exp; return exp(a); }
/** \internal \returns the log of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet plog(const Packet& a) { using std::log; return log(a); }
/** \internal \returns the log1p of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet plog1p(const Packet& a) { return numext::log1p(a); }
/** \internal \returns the log10 of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet plog10(const Packet& a) { using std::log10; return log10(a); }
/** \internal \returns the square-root of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet prsqrt(const Packet& a) {
return pdiv(pset1<Packet>(1), psqrt(a));
}
/** \internal \returns the rounded value of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pround(const Packet& a) { using numext::round; return round(a); }
/** \internal \returns the floor of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pfloor(const Packet& a) { using numext::floor; return floor(a); }
/** \internal \returns the ceil of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); }
/***************************************************************************
* The following functions might not have to be overwritten for vectorized types
***************************************************************************/
/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)
template<typename Packet>
inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)
{
pstore(to, pset1<Packet>(a));
}
/** \internal \returns a * b + c (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pmadd(const Packet& a,
const Packet& b,
const Packet& c)
{ return padd(pmul(a, b),c); }
/** \internal \returns a packet version of \a *from.
* The pointer \a from must be aligned on a \a Alignment bytes boundary. */
template<typename Packet, int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits<Packet>::type* from)
{
if(Alignment >= unpacket_traits<Packet>::alignment)
return pload<Packet>(from);
else
return ploadu<Packet>(from);
}
/** \internal copy the packet \a from to \a *to.
* The pointer \a from must be aligned on a \a Alignment bytes boundary. */
template<typename Scalar, typename Packet, int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from)
{
if(Alignment >= unpacket_traits<Packet>::alignment)
pstore(to, from);
else
pstoreu(to, from);
}
/** \internal \returns a packet version of \a *from.
* Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the
* hardware if available to speedup the loading of data that won't be modified
* by the current computation.
*/
template<typename Packet, int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from)
{
return ploadt<Packet, LoadMode>(from);
}
/** \internal default implementation of palign() allowing partial specialization */
template<int Offset,typename PacketType>
struct palign_impl
{
// by default data are aligned, so there is nothing to be done :)
static inline void run(PacketType&, const PacketType&) {}
};
/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
* of \a first and \a Offset first elements of \a second.
*
* This function is currently only used to optimize matrix-vector products on unligned matrices.
* It takes 2 packets that represent a contiguous memory array, and returns a packet starting
* at the position \a Offset. For instance, for packets of 4 elements, we have:
* Input:
* - first = {f0,f1,f2,f3}
* - second = {s0,s1,s2,s3}
* Output:
* - if Offset==0 then {f0,f1,f2,f3}
* - if Offset==1 then {f1,f2,f3,s0}
* - if Offset==2 then {f2,f3,s0,s1}
* - if Offset==3 then {f3,s0,s1,s3}
*/
template<int Offset,typename PacketType>
inline void palign(PacketType& first, const PacketType& second)
{
palign_impl<Offset,PacketType>::run(first,second);
}
/***************************************************************************
* Fast complex products (GCC generates a function call which is very slow)
***************************************************************************/
// Eigen+CUDA does not support complexes.
#ifndef __CUDACC__
template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)
{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
template<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b)
{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
#endif
/***************************************************************************
* PacketBlock, that is a collection of N packets where the number of words
* in the packet is a multiple of N.
***************************************************************************/
template <typename Packet,int N=unpacket_traits<Packet>::size> struct PacketBlock {
Packet packet[N];
};
template<typename Packet> EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet,1>& /*kernel*/) {
// Nothing to do in the scalar case, i.e. a 1x1 matrix.
}
/***************************************************************************
* Selector, i.e. vector of N boolean values used to select (i.e. blend)
* words from 2 packets.
***************************************************************************/
template <size_t N> struct Selector {
bool select[N];
};
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pblend(const Selector<unpacket_traits<Packet>::size>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) {
return ifPacket.select[0] ? thenPacket : elsePacket;
}
/** \internal \returns \a a with the first coefficient replaced by the scalar b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pinsertfirst(const Packet& a, typename unpacket_traits<Packet>::type b)
{
// Default implementation based on pblend.
// It must be specialized for higher performance.
Selector<unpacket_traits<Packet>::size> mask;
mask.select[0] = true;
// This for loop should be optimized away by the compiler.
for(Index i=1; i<unpacket_traits<Packet>::size; ++i)
mask.select[i] = false;
return pblend(mask, pset1<Packet>(b), a);
}
/** \internal \returns \a a with the last coefficient replaced by the scalar b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pinsertlast(const Packet& a, typename unpacket_traits<Packet>::type b)
{
// Default implementation based on pblend.
// It must be specialized for higher performance.
Selector<unpacket_traits<Packet>::size> mask;
// This for loop should be optimized away by the compiler.
for(Index i=0; i<unpacket_traits<Packet>::size-1; ++i)
mask.select[i] = false;
mask.select[unpacket_traits<Packet>::size-1] = true;
return pblend(mask, pset1<Packet>(b), a);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERIC_PACKET_MATH_H
| 22,185 | 36.350168 | 132 | h |
abess | abess-master/python/include/Eigen/src/Core/Inverse.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INVERSE_H
#define EIGEN_INVERSE_H
namespace Eigen {
template<typename XprType,typename StorageKind> class InverseImpl;
namespace internal {
template<typename XprType>
struct traits<Inverse<XprType> >
: traits<typename XprType::PlainObject>
{
typedef typename XprType::PlainObject PlainObject;
typedef traits<PlainObject> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit
};
};
} // end namespace internal
/** \class Inverse
*
* \brief Expression of the inverse of another expression
*
* \tparam XprType the type of the expression we are taking the inverse
*
* This class represents an abstract expression of A.inverse()
* and most of the time this is the only way it is used.
*
*/
template<typename XprType>
class Inverse : public InverseImpl<XprType,typename internal::traits<XprType>::StorageKind>
{
public:
typedef typename XprType::StorageIndex StorageIndex;
typedef typename XprType::PlainObject PlainObject;
typedef typename XprType::Scalar Scalar;
typedef typename internal::ref_selector<XprType>::type XprTypeNested;
typedef typename internal::remove_all<XprTypeNested>::type XprTypeNestedCleaned;
typedef typename internal::ref_selector<Inverse>::type Nested;
typedef typename internal::remove_all<XprType>::type NestedExpression;
explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr)
: m_xpr(xpr)
{}
EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; }
protected:
XprTypeNested m_xpr;
};
// Generic API dispatcher
template<typename XprType, typename StorageKind>
class InverseImpl
: public internal::generic_xpr_base<Inverse<XprType> >::type
{
public:
typedef typename internal::generic_xpr_base<Inverse<XprType> >::type Base;
typedef typename XprType::Scalar Scalar;
private:
Scalar coeff(Index row, Index col) const;
Scalar coeff(Index i) const;
};
namespace internal {
/** \internal
* \brief Default evaluator for Inverse expression.
*
* This default evaluator for Inverse expression simply evaluate the inverse into a temporary
* by a call to internal::call_assignment_no_alias.
* Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for
* there own nested expression.
*
* \sa class Inverse
*/
template<typename ArgType>
struct unary_evaluator<Inverse<ArgType> >
: public evaluator<typename Inverse<ArgType>::PlainObject>
{
typedef Inverse<ArgType> InverseType;
typedef typename InverseType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
enum { Flags = Base::Flags | EvalBeforeNestingBit };
unary_evaluator(const InverseType& inv_xpr)
: m_result(inv_xpr.rows(), inv_xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
internal::call_assignment_no_alias(m_result, inv_xpr);
}
protected:
PlainObject m_result;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_INVERSE_H
| 3,519 | 28.579832 | 97 | h |
abess | abess-master/python/include/Eigen/src/Core/MathFunctionsImpl.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Pedro Gonnet ([email protected])
// Copyright (C) 2016 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATHFUNCTIONSIMPL_H
#define EIGEN_MATHFUNCTIONSIMPL_H
namespace Eigen {
namespace internal {
/** \internal \returns the hyperbolic tan of \a a (coeff-wise)
Doesn't do anything fancy, just a 13/6-degree rational interpolant which
is accurate up to a couple of ulp in the range [-9, 9], outside of which
the tanh(x) = +/-1.
This implementation works on both scalars and packets.
*/
template<typename T>
T generic_fast_tanh_float(const T& a_x)
{
// Clamp the inputs to the range [-9, 9] since anything outside
// this range is +/-1.0f in single-precision.
const T plus_9 = pset1<T>(9.f);
const T minus_9 = pset1<T>(-9.f);
// NOTE GCC prior to 6.3 might improperly optimize this max/min
// step such that if a_x is nan, x will be either 9 or -9,
// and tanh will return 1 or -1 instead of nan.
// This is supposed to be fixed in gcc6.3,
// see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
const T x = pmax(minus_9,pmin(plus_9,a_x));
// The monomial coefficients of the numerator polynomial (odd).
const T alpha_1 = pset1<T>(4.89352455891786e-03f);
const T alpha_3 = pset1<T>(6.37261928875436e-04f);
const T alpha_5 = pset1<T>(1.48572235717979e-05f);
const T alpha_7 = pset1<T>(5.12229709037114e-08f);
const T alpha_9 = pset1<T>(-8.60467152213735e-11f);
const T alpha_11 = pset1<T>(2.00018790482477e-13f);
const T alpha_13 = pset1<T>(-2.76076847742355e-16f);
// The monomial coefficients of the denominator polynomial (even).
const T beta_0 = pset1<T>(4.89352518554385e-03f);
const T beta_2 = pset1<T>(2.26843463243900e-03f);
const T beta_4 = pset1<T>(1.18534705686654e-04f);
const T beta_6 = pset1<T>(1.19825839466702e-06f);
// Since the polynomials are odd/even, we need x^2.
const T x2 = pmul(x, x);
// Evaluate the numerator polynomial p.
T p = pmadd(x2, alpha_13, alpha_11);
p = pmadd(x2, p, alpha_9);
p = pmadd(x2, p, alpha_7);
p = pmadd(x2, p, alpha_5);
p = pmadd(x2, p, alpha_3);
p = pmadd(x2, p, alpha_1);
p = pmul(x, p);
// Evaluate the denominator polynomial p.
T q = pmadd(x2, beta_6, beta_4);
q = pmadd(x2, q, beta_2);
q = pmadd(x2, q, beta_0);
// Divide the numerator by the denominator.
return pdiv(p, q);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_MATHFUNCTIONSIMPL_H
| 2,776 | 34.151899 | 76 | h |
abess | abess-master/python/include/Eigen/src/Core/NestByValue.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NESTBYVALUE_H
#define EIGEN_NESTBYVALUE_H
namespace Eigen {
namespace internal {
template<typename ExpressionType>
struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>
{};
}
/** \class NestByValue
* \ingroup Core_Module
*
* \brief Expression which must be nested by value
*
* \tparam ExpressionType the type of the object of which we are requiring nesting-by-value
*
* This class is the return type of MatrixBase::nestByValue()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::nestByValue()
*/
template<typename ExpressionType> class NestByValue
: public internal::dense_xpr_base< NestByValue<ExpressionType> >::type
{
public:
typedef typename internal::dense_xpr_base<NestByValue>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)
EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }
EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }
EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
protected:
const ExpressionType m_expression;
};
/** \returns an expression of the temporary version of *this.
*/
template<typename Derived>
inline const NestByValue<Derived>
DenseBase<Derived>::nestByValue() const
{
return NestByValue<Derived>(derived());
}
} // end namespace Eigen
#endif // EIGEN_NESTBYVALUE_H
| 3,400 | 29.63964 | 105 | h |
abess | abess-master/python/include/Eigen/src/Core/NoAlias.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NOALIAS_H
#define EIGEN_NOALIAS_H
namespace Eigen {
/** \class NoAlias
* \ingroup Core_Module
*
* \brief Pseudo expression providing an operator = assuming no aliasing
*
* \tparam ExpressionType the type of the object on which to do the lazy assignment
*
* This class represents an expression with special assignment operators
* assuming no aliasing between the target expression and the source expression.
* More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
* It is the return type of MatrixBase::noalias()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::noalias()
*/
template<typename ExpressionType, template <typename> class StorageBase>
class NoAlias
{
public:
typedef typename ExpressionType::Scalar Scalar;
explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
{
call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return m_expression;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
{
call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return m_expression;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
{
call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return m_expression;
}
EIGEN_DEVICE_FUNC
ExpressionType& expression() const
{
return m_expression;
}
protected:
ExpressionType& m_expression;
};
/** \returns a pseudo expression of \c *this with an operator= assuming
* no aliasing between \c *this and the source expression.
*
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product
* expressions have this flag. Therefore, noalias() is only usefull when
* the source expression contains a matrix product.
*
* Here are some examples where noalias is usefull:
* \code
* D.noalias() = A * B;
* D.noalias() += A.transpose() * B;
* D.noalias() -= 2 * A * B.adjoint();
* \endcode
*
* On the other hand the following example will lead to a \b wrong result:
* \code
* A.noalias() = A * B;
* \endcode
* because the result matrix A is also an operand of the matrix product. Therefore,
* there is no alternative than evaluating A * B in a temporary, that is the default
* behavior when you write:
* \code
* A = A * B;
* \endcode
*
* \sa class NoAlias
*/
template<typename Derived>
NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
{
return NoAlias<Derived, Eigen::MatrixBase >(derived());
}
} // end namespace Eigen
#endif // EIGEN_NOALIAS_H
| 3,582 | 31.87156 | 127 | h |
abess | abess-master/python/include/Eigen/src/Core/Product.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2011 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PRODUCT_H
#define EIGEN_PRODUCT_H
namespace Eigen {
template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl;
namespace internal {
template<typename Lhs, typename Rhs, int Option>
struct traits<Product<Lhs, Rhs, Option> >
{
typedef typename remove_all<Lhs>::type LhsCleaned;
typedef typename remove_all<Rhs>::type RhsCleaned;
typedef traits<LhsCleaned> LhsTraits;
typedef traits<RhsCleaned> RhsTraits;
typedef MatrixXpr XprKind;
typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,
typename RhsTraits::StorageKind,
internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;
typedef typename promote_index_type<typename LhsTraits::StorageIndex,
typename RhsTraits::StorageIndex>::type StorageIndex;
enum {
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
ColsAtCompileTime = RhsTraits::ColsAtCompileTime,
MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,
MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
// FIXME: only needed by GeneralMatrixMatrixTriangular
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
// The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit
: (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
: ( ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit))
|| ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) ) ? RowMajorBit
: NoPreferredStorageOrderBit
};
};
} // end namespace internal
/** \class Product
* \ingroup Core_Module
*
* \brief Expression of the product of two arbitrary matrices or vectors
*
* \tparam _Lhs the type of the left-hand side expression
* \tparam _Rhs the type of the right-hand side expression
*
* This class represents an expression of the product of two arbitrary matrices.
*
* The other template parameters are:
* \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct
*
*/
template<typename _Lhs, typename _Rhs, int Option>
class Product : public ProductImpl<_Lhs,_Rhs,Option,
typename internal::product_promote_storage_type<typename internal::traits<_Lhs>::StorageKind,
typename internal::traits<_Rhs>::StorageKind,
internal::product_type<_Lhs,_Rhs>::ret>::ret>
{
public:
typedef _Lhs Lhs;
typedef _Rhs Rhs;
typedef typename ProductImpl<
Lhs, Rhs, Option,
typename internal::product_promote_storage_type<typename internal::traits<Lhs>::StorageKind,
typename internal::traits<Rhs>::StorageKind,
internal::product_type<Lhs,Rhs>::ret>::ret>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
typedef typename internal::ref_selector<Lhs>::type LhsNested;
typedef typename internal::ref_selector<Rhs>::type RhsNested;
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
EIGEN_DEVICE_FUNC Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
{
eigen_assert(lhs.cols() == rhs.rows()
&& "invalid matrix product"
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }
EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }
protected:
LhsNested m_lhs;
RhsNested m_rhs;
};
namespace internal {
template<typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs,Rhs>::ret>
class dense_product_base
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{};
/** Convertion to scalar for inner-products */
template<typename Lhs, typename Rhs, int Option>
class dense_product_base<Lhs, Rhs, Option, InnerProduct>
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{
typedef Product<Lhs,Rhs,Option> ProductXpr;
typedef typename internal::dense_xpr_base<ProductXpr>::type Base;
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
operator const Scalar() const
{
return internal::evaluator<ProductXpr>(derived()).coeff(0,0);
}
};
} // namespace internal
// Generic API dispatcher
template<typename Lhs, typename Rhs, int Option, typename StorageKind>
class ProductImpl : public internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type
{
public:
typedef typename internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type Base;
};
template<typename Lhs, typename Rhs, int Option>
class ProductImpl<Lhs,Rhs,Option,Dense>
: public internal::dense_product_base<Lhs,Rhs,Option>
{
typedef Product<Lhs, Rhs, Option> Derived;
public:
typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
protected:
enum {
IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) &&
(ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic),
EnableCoeff = IsOneByOne || Option==LazyProduct
};
public:
EIGEN_DEVICE_FUNC Scalar coeff(Index row, Index col) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
return internal::evaluator<Derived>(derived()).coeff(row,col);
}
EIGEN_DEVICE_FUNC Scalar coeff(Index i) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
return internal::evaluator<Derived>(derived()).coeff(i);
}
};
} // end namespace Eigen
#endif // EIGEN_PRODUCT_H
| 7,149 | 37.235294 | 133 | h |
abess | abess-master/python/include/Eigen/src/Core/Random.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_RANDOM_H
#define EIGEN_RANDOM_H
namespace Eigen {
namespace internal {
template<typename Scalar> struct scalar_random_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)
inline const Scalar operator() () const { return random<Scalar>(); }
};
template<typename Scalar>
struct functor_traits<scalar_random_op<Scalar> >
{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
} // end namespace internal
/** \returns a random matrix expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* \not_reentrant
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
* instead.
*
*
* Example: \include MatrixBase_random_int_int.cpp
* Output: \verbinclude MatrixBase_random_int_int.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()
*/
template<typename Derived>
inline const typename DenseBase<Derived>::RandomReturnType
DenseBase<Derived>::Random(Index rows, Index cols)
{
return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
}
/** \returns a random vector expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
* \not_reentrant
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
* it is redundant to pass \a size as argument, so Random() should be used
* instead.
*
* Example: \include MatrixBase_random_int.cpp
* Output: \verbinclude MatrixBase_random_int.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary vector whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random()
*/
template<typename Derived>
inline const typename DenseBase<Derived>::RandomReturnType
DenseBase<Derived>::Random(Index size)
{
return NullaryExpr(size, internal::scalar_random_op<Scalar>());
}
/** \returns a fixed-size random matrix or vector expression
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* Example: \include MatrixBase_random.cpp
* Output: \verbinclude MatrixBase_random.out
*
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
* \not_reentrant
*
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)
*/
template<typename Derived>
inline const typename DenseBase<Derived>::RandomReturnType
DenseBase<Derived>::Random()
{
return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
}
/** Sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* Example: \include MatrixBase_setRandom.cpp
* Output: \verbinclude MatrixBase_setRandom.out
*
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/
template<typename Derived>
inline Derived& DenseBase<Derived>::setRandom()
{
return *this = Random(rows(), cols());
}
/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \only_for_vectors
* \not_reentrant
*
* Example: \include Matrix_setRandom_int.cpp
* Output: \verbinclude Matrix_setRandom_int.out
*
* \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setRandom(Index newSize)
{
resize(newSize);
return setRandom();
}
/** Resizes to the given size, and sets all coefficients in this expression to random values.
*
* Numbers are uniformly spread through their whole definition range for integer types,
* and in the [-1:1] range for floating point scalar types.
*
* \not_reentrant
*
* \param rows the new number of rows
* \param cols the new number of columns
*
* Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out
*
* \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
{
resize(rows, cols);
return setRandom();
}
} // end namespace Eigen
#endif // EIGEN_RANDOM_H
| 6,379 | 33.863388 | 107 | h |
abess | abess-master/python/include/Eigen/src/Core/Replicate.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REPLICATE_H
#define EIGEN_REPLICATE_H
namespace Eigen {
namespace internal {
template<typename MatrixType,int RowFactor,int ColFactor>
struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
: traits<MatrixType>
{
typedef typename MatrixType::Scalar Scalar;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
? Dynamic
: RowFactor * MatrixType::RowsAtCompileTime,
ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic
? Dynamic
: ColFactor * MatrixType::ColsAtCompileTime,
//FIXME we don't propagate the max sizes !!!
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
: MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
: (MatrixType::Flags & RowMajorBit) ? 1 : 0,
// FIXME enable DirectAccess with negative strides?
Flags = IsRowMajor ? RowMajorBit : 0
};
};
}
/**
* \class Replicate
* \ingroup Core_Module
*
* \brief Expression of the multiple replication of a matrix or vector
*
* \tparam MatrixType the type of the object we are replicating
* \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.
* \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.
*
* This class represents an expression of the multiple replication of a matrix or vector.
* It is the return type of DenseBase::replicate() and most of the time
* this is the only way it is used.
*
* \sa DenseBase::replicate()
*/
template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
: public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
{
typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested;
public:
typedef typename internal::dense_xpr_base<Replicate>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
template<typename OriginalMatrixType>
EIGEN_DEVICE_FUNC
inline explicit Replicate(const OriginalMatrixType& matrix)
: m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);
}
template<typename OriginalMatrixType>
EIGEN_DEVICE_FUNC
inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
: m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
}
EIGEN_DEVICE_FUNC
inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
EIGEN_DEVICE_FUNC
inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
EIGEN_DEVICE_FUNC
const _MatrixTypeNested& nestedExpression() const
{
return m_matrix;
}
protected:
MatrixTypeNested m_matrix;
const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
};
/**
* \return an expression of the replication of \c *this
*
* Example: \include MatrixBase_replicate.cpp
* Output: \verbinclude MatrixBase_replicate.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
*/
template<typename Derived>
template<int RowFactor, int ColFactor>
const Replicate<Derived,RowFactor,ColFactor>
DenseBase<Derived>::replicate() const
{
return Replicate<Derived,RowFactor,ColFactor>(derived());
}
/**
* \return an expression of the replication of each column (or row) of \c *this
*
* Example: \include DirectionWise_replicate_int.cpp
* Output: \verbinclude DirectionWise_replicate_int.out
*
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
*/
template<typename ExpressionType, int Direction>
const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
{
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
}
} // end namespace Eigen
#endif // EIGEN_REPLICATE_H
| 5,595 | 38.132867 | 123 | h |
abess | abess-master/python/include/Eigen/src/Core/Reverse.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Ricard Marxer <[email protected]>
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REVERSE_H
#define EIGEN_REVERSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType, int Direction>
struct traits<Reverse<MatrixType, Direction> >
: traits<MatrixType>
{
typedef typename MatrixType::Scalar Scalar;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit)
};
};
template<typename PacketType, bool ReversePacket> struct reverse_packet_cond
{
static inline PacketType run(const PacketType& x) { return preverse(x); }
};
template<typename PacketType> struct reverse_packet_cond<PacketType,false>
{
static inline PacketType run(const PacketType& x) { return x; }
};
} // end namespace internal
/** \class Reverse
* \ingroup Core_Module
*
* \brief Expression of the reverse of a vector or matrix
*
* \tparam MatrixType the type of the object of which we are taking the reverse
* \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections
*
* This class represents an expression of the reverse of a vector.
* It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::reverse(), VectorwiseOp::reverse()
*/
template<typename MatrixType, int Direction> class Reverse
: public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type
{
public:
typedef typename internal::dense_xpr_base<Reverse>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
using Base::IsRowMajor;
protected:
enum {
PacketSize = internal::packet_traits<Scalar>::size,
IsColMajor = !IsRowMajor,
ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1,
ReversePacket = (Direction == BothDirections)
|| ((Direction == Vertical) && IsColMajor)
|| ((Direction == Horizontal) && IsRowMajor)
};
typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;
public:
EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC inline Index innerStride() const
{
return -m_matrix.innerStride();
}
EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&
nestedExpression() const
{
return m_matrix;
}
protected:
typename MatrixType::Nested m_matrix;
};
/** \returns an expression of the reverse of *this.
*
* Example: \include MatrixBase_reverse.cpp
* Output: \verbinclude MatrixBase_reverse.out
*
*/
template<typename Derived>
inline typename DenseBase<Derived>::ReverseReturnType
DenseBase<Derived>::reverse()
{
return ReverseReturnType(derived());
}
//reverse const overload moved DenseBase.h due to a CUDA compiler bug
/** This is the "in place" version of reverse: it reverses \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API enables reverse operations without the need for a temporary
* - it allows future optimizations (cache friendliness, etc.)
*
* \sa VectorwiseOp::reverseInPlace(), reverse() */
template<typename Derived>
inline void DenseBase<Derived>::reverseInPlace()
{
if(cols()>rows())
{
Index half = cols()/2;
leftCols(half).swap(rightCols(half).reverse());
if((cols()%2)==1)
{
Index half2 = rows()/2;
col(half).head(half2).swap(col(half).tail(half2).reverse());
}
}
else
{
Index half = rows()/2;
topRows(half).swap(bottomRows(half).reverse());
if((rows()%2)==1)
{
Index half2 = cols()/2;
row(half).head(half2).swap(row(half).tail(half2).reverse());
}
}
}
namespace internal {
template<int Direction>
struct vectorwise_reverse_inplace_impl;
template<>
struct vectorwise_reverse_inplace_impl<Vertical>
{
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
Index half = xpr.rows()/2;
xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse());
}
};
template<>
struct vectorwise_reverse_inplace_impl<Horizontal>
{
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
Index half = xpr.cols()/2;
xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse());
}
};
} // end namespace internal
/** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this.
*
* In most cases it is probably better to simply use the reversed expression
* of a matrix. However, when reversing the matrix data itself is really needed,
* then this "in-place" version is probably the right choice because it provides
* the following additional benefits:
* - less error prone: doing the same operation with .reverse() requires special care:
* \code m = m.reverse().eval(); \endcode
* - this API enables reverse operations without the need for a temporary
*
* \sa DenseBase::reverseInPlace(), reverse() */
template<typename ExpressionType, int Direction>
void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
{
internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
}
} // end namespace Eigen
#endif // EIGEN_REVERSE_H
| 7,073 | 32.367925 | 116 | h |
abess | abess-master/python/include/Eigen/src/Core/Select.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELECT_H
#define EIGEN_SELECT_H
namespace Eigen {
/** \class Select
* \ingroup Core_Module
*
* \brief Expression of a coefficient wise version of the C++ ternary operator ?:
*
* \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
* \param ThenMatrixType the type of the \em then expression
* \param ElseMatrixType the type of the \em else expression
*
* This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
* It is the return type of DenseBase::select() and most of the time this is the only way it is used.
*
* \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
*/
namespace internal {
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
: traits<ThenMatrixType>
{
typedef typename traits<ThenMatrixType>::Scalar Scalar;
typedef Dense StorageKind;
typedef typename traits<ThenMatrixType>::XprKind XprKind;
typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
typedef typename ThenMatrixType::Nested ThenMatrixNested;
typedef typename ElseMatrixType::Nested ElseMatrixNested;
enum {
RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,
ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit
};
};
}
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
class Select : public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type,
internal::no_assignment_operator
{
public:
typedef typename internal::dense_xpr_base<Select>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Select)
inline EIGEN_DEVICE_FUNC
Select(const ConditionMatrixType& a_conditionMatrix,
const ThenMatrixType& a_thenMatrix,
const ElseMatrixType& a_elseMatrix)
: m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
{
eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
}
inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); }
inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); }
inline EIGEN_DEVICE_FUNC
const Scalar coeff(Index i, Index j) const
{
if (m_condition.coeff(i,j))
return m_then.coeff(i,j);
else
return m_else.coeff(i,j);
}
inline EIGEN_DEVICE_FUNC
const Scalar coeff(Index i) const
{
if (m_condition.coeff(i))
return m_then.coeff(i);
else
return m_else.coeff(i);
}
inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const
{
return m_condition;
}
inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const
{
return m_then;
}
inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const
{
return m_else;
}
protected:
typename ConditionMatrixType::Nested m_condition;
typename ThenMatrixType::Nested m_then;
typename ElseMatrixType::Nested m_else;
};
/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
* if \c *this(i,j), and \a elseMatrix(i,j) otherwise.
*
* Example: \include MatrixBase_select.cpp
* Output: \verbinclude MatrixBase_select.out
*
* \sa class Select
*/
template<typename Derived>
template<typename ThenDerived,typename ElseDerived>
inline const Select<Derived,ThenDerived,ElseDerived>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
const DenseBase<ElseDerived>& elseMatrix) const
{
return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());
}
/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em else expression being a scalar value.
*
* \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template<typename Derived>
template<typename ThenDerived>
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
const typename ThenDerived::Scalar& elseScalar) const
{
return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
}
/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em then expression being a scalar value.
*
* \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template<typename Derived>
template<typename ElseDerived>
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
const DenseBase<ElseDerived>& elseMatrix) const
{
return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
}
} // end namespace Eigen
#endif // EIGEN_SELECT_H
| 6,020 | 35.93865 | 116 | h |
abess | abess-master/python/include/Eigen/src/Core/SelfCwiseBinaryOp.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFCWISEBINARYOP_H
#define EIGEN_SELFCWISEBINARYOP_H
namespace Eigen {
// TODO generalize the scalar type of 'other'
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
{
typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
{
typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
{
typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());
return derived();
}
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());
return derived();
}
} // end namespace Eigen
#endif // EIGEN_SELFCWISEBINARYOP_H
| 1,909 | 35.730769 | 131 | h |
abess | abess-master/python/include/Eigen/src/Core/SolverBase.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SOLVERBASE_H
#define EIGEN_SOLVERBASE_H
namespace Eigen {
namespace internal {
} // end namespace internal
/** \class SolverBase
* \brief A base class for matrix decomposition and solvers
*
* \tparam Derived the actual type of the decomposition/solver.
*
* Any matrix decomposition inheriting this base class provide the following API:
*
* \code
* MatrixType A, b, x;
* DecompositionType dec(A);
* x = dec.solve(b); // solve A * x = b
* x = dec.transpose().solve(b); // solve A^T * x = b
* x = dec.adjoint().solve(b); // solve A' * x = b
* \endcode
*
* \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors.
*
* \sa class PartialPivLU, class FullPivLU
*/
template<typename Derived>
class SolverBase : public EigenBase<Derived>
{
public:
typedef EigenBase<Derived> Base;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef Scalar CoeffReturnType;
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
|| internal::traits<Derived>::MaxColsAtCompileTime == 1
};
/** Default constructor */
SolverBase()
{}
~SolverBase()
{}
using Base::derived;
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
/** \internal the return type of transpose() */
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
/** \returns an expression of the transposed of the factored matrix.
*
* A typical usage is to solve for the transposed problem A^T x = b:
* \code x = dec.transpose().solve(b); \endcode
*
* \sa adjoint(), solve()
*/
inline ConstTransposeReturnType transpose() const
{
return ConstTransposeReturnType(derived());
}
/** \internal the return type of adjoint() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
ConstTransposeReturnType
>::type AdjointReturnType;
/** \returns an expression of the adjoint of the factored matrix
*
* A typical usage is to solve for the adjoint problem A' x = b:
* \code x = dec.adjoint().solve(b); \endcode
*
* For real scalar types, this function is equivalent to transpose().
*
* \sa transpose(), solve()
*/
inline AdjointReturnType adjoint() const
{
return AdjointReturnType(derived().transpose());
}
protected:
};
namespace internal {
template<typename Derived>
struct generic_xpr_base<Derived, MatrixXpr, SolverStorage>
{
typedef SolverBase<Derived> type;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SOLVERBASE_H
| 4,365 | 32.328244 | 123 | h |
abess | abess-master/python/include/Eigen/src/Core/Stride.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STRIDE_H
#define EIGEN_STRIDE_H
namespace Eigen {
/** \class Stride
* \ingroup Core_Module
*
* \brief Holds strides information for Map
*
* This class holds the strides information for mapping arrays with strides with class Map.
*
* It holds two values: the inner stride and the outer stride.
*
* The inner stride is the pointer increment between two consecutive entries within a given row of a
* row-major matrix or within a given column of a column-major matrix.
*
* The outer stride is the pointer increment between two consecutive rows of a row-major matrix or
* between two consecutive columns of a column-major matrix.
*
* These two values can be passed either at compile-time as template parameters, or at runtime as
* arguments to the constructor.
*
* Indeed, this class takes two template parameters:
* \tparam _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.
* \tparam _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.
*
* Here is an example:
* \include Map_general_stride.cpp
* Output: \verbinclude Map_general_stride.out
*
* \sa class InnerStride, class OuterStride, \ref TopicStorageOrders
*/
template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
class Stride
{
public:
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
OuterStrideAtCompileTime = _OuterStrideAtCompileTime
};
/** Default constructor, for use when strides are fixed at compile time */
EIGEN_DEVICE_FUNC
Stride()
: m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)
{
eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic);
}
/** Constructor allowing to pass the strides at runtime */
EIGEN_DEVICE_FUNC
Stride(Index outerStride, Index innerStride)
: m_outer(outerStride), m_inner(innerStride)
{
eigen_assert(innerStride>=0 && outerStride>=0);
}
/** Copy constructor */
EIGEN_DEVICE_FUNC
Stride(const Stride& other)
: m_outer(other.outer()), m_inner(other.inner())
{}
/** \returns the outer stride */
EIGEN_DEVICE_FUNC
inline Index outer() const { return m_outer.value(); }
/** \returns the inner stride */
EIGEN_DEVICE_FUNC
inline Index inner() const { return m_inner.value(); }
protected:
internal::variable_if_dynamic<Index, OuterStrideAtCompileTime> m_outer;
internal::variable_if_dynamic<Index, InnerStrideAtCompileTime> m_inner;
};
/** \brief Convenience specialization of Stride to specify only an inner stride
* See class Map for some examples */
template<int Value>
class InnerStride : public Stride<0, Value>
{
typedef Stride<0, Value> Base;
public:
EIGEN_DEVICE_FUNC InnerStride() : Base() {}
EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code
};
/** \brief Convenience specialization of Stride to specify only an outer stride
* See class Map for some examples */
template<int Value>
class OuterStride : public Stride<Value, 0>
{
typedef Stride<Value, 0> Base;
public:
EIGEN_DEVICE_FUNC OuterStride() : Base() {}
EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code
};
} // end namespace Eigen
#endif // EIGEN_STRIDE_H
| 3,865 | 33.517857 | 111 | h |
abess | abess-master/python/include/Eigen/src/Core/Transpose.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
// Copyright (C) 2009-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRANSPOSE_H
#define EIGEN_TRANSPOSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct traits<Transpose<MatrixType> > : public traits<MatrixType>
{
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
enum {
RowsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
Flags0 = traits<MatrixTypeNestedPlain>::Flags & ~(LvalueBit | NestByRefBit),
Flags1 = Flags0 | FlagsLvalueBit,
Flags = Flags1 ^ RowMajorBit,
InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
};
};
}
template<typename MatrixType, typename StorageKind> class TransposeImpl;
/** \class Transpose
* \ingroup Core_Module
*
* \brief Expression of the transpose of a matrix
*
* \tparam MatrixType the type of the object of which we are taking the transpose
*
* This class represents an expression of the transpose of a matrix.
* It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::transpose(), MatrixBase::adjoint()
*/
template<typename MatrixType> class Transpose
: public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
{
public:
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
EIGEN_DEVICE_FUNC
explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
typename internal::remove_reference<MatrixTypeNested>::type&
nestedExpression() { return m_matrix; }
/** \internal */
void resize(Index nrows, Index ncols) {
m_matrix.resize(ncols,nrows);
}
protected:
typename internal::ref_selector<MatrixType>::non_const_type m_matrix;
};
namespace internal {
template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
struct TransposeImpl_base
{
typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
};
template<typename MatrixType>
struct TransposeImpl_base<MatrixType, false>
{
typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
};
} // end namespace internal
// Generic API dispatcher
template<typename XprType, typename StorageKind>
class TransposeImpl
: public internal::generic_xpr_base<Transpose<XprType> >::type
{
public:
typedef typename internal::generic_xpr_base<Transpose<XprType> >::type Base;
};
template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
: public internal::TransposeImpl_base<MatrixType>::type
{
public:
typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
using Base::coeffRef;
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
typedef typename internal::conditional<
internal::is_lvalue<MatrixType>::value,
Scalar,
const Scalar
>::type ScalarWithConstIfNotLvalue;
EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); }
// FIXME: shall we keep the const version of coeffRef?
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index rowId, Index colId) const
{
return derived().nestedExpression().coeffRef(colId, rowId);
}
EIGEN_DEVICE_FUNC
inline const Scalar& coeffRef(Index index) const
{
return derived().nestedExpression().coeffRef(index);
}
};
/** \returns an expression of the transpose of *this.
*
* Example: \include MatrixBase_transpose.cpp
* Output: \verbinclude MatrixBase_transpose.out
*
* \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
* \code
* m = m.transpose(); // bug!!! caused by aliasing effect
* \endcode
* Instead, use the transposeInPlace() method:
* \code
* m.transposeInPlace();
* \endcode
* which gives Eigen good opportunities for optimization, or alternatively you can also do:
* \code
* m = m.transpose().eval();
* \endcode
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
inline Transpose<Derived>
DenseBase<Derived>::transpose()
{
return TransposeReturnType(derived());
}
/** This is the const version of transpose().
*
* Make sure you read the warning for transpose() !
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
inline typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
}
/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
*
* Example: \include MatrixBase_adjoint.cpp
* Output: \verbinclude MatrixBase_adjoint.out
*
* \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
* \code
* m = m.adjoint(); // bug!!! caused by aliasing effect
* \endcode
* Instead, use the adjointInPlace() method:
* \code
* m.adjointInPlace();
* \endcode
* which gives Eigen good opportunities for optimization, or alternatively you can also do:
* \code
* m = m.adjoint().eval();
* \endcode
*
* \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
template<typename Derived>
inline const typename MatrixBase<Derived>::AdjointReturnType
MatrixBase<Derived>::adjoint() const
{
return AdjointReturnType(this->transpose());
}
/***************************************************************************
* "in place" transpose implementation
***************************************************************************/
namespace internal {
template<typename MatrixType,
bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic,
bool MatchPacketSize =
(int(MatrixType::RowsAtCompileTime) == int(internal::packet_traits<typename MatrixType::Scalar>::size))
&& (internal::evaluator<MatrixType>::Flags&PacketAccessBit) >
struct inplace_transpose_selector;
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true,false> { // square matrix
static void run(MatrixType& m) {
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
}
};
// TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only.
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x PacketSize
static void run(MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet;
const Index PacketSize = internal::packet_traits<Scalar>::size;
const Index Alignment = internal::evaluator<MatrixType>::Alignment;
PacketBlock<Packet> A;
for (Index i=0; i<PacketSize; ++i)
A.packet[i] = m.template packetByOuterInner<Alignment>(i,0);
internal::ptranspose(A);
for (Index i=0; i<PacketSize; ++i)
m.template writePacket<Alignment>(m.rowIndexByOuterInner(i,0), m.colIndexByOuterInner(i,0), A.packet[i]);
}
};
template<typename MatrixType,bool MatchPacketSize>
struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square matrix
static void run(MatrixType& m) {
if (m.rows()==m.cols())
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
else
m = m.transpose().eval();
}
};
} // end namespace internal
/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
* Thus, doing
* \code
* m.transposeInPlace();
* \endcode
* has the same effect on m as doing
* \code
* m = m.transpose().eval();
* \endcode
* and is faster and also safer because in the latter line of code, forgetting the eval() results
* in a bug caused by \ref TopicAliasing "aliasing".
*
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
* If you just need the transpose of a matrix, use transpose().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), adjointInPlace() */
template<typename Derived>
inline void DenseBase<Derived>::transposeInPlace()
{
eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
&& "transposeInPlace() called on a non-square non-resizable matrix");
internal::inplace_transpose_selector<Derived>::run(derived());
}
/***************************************************************************
* "in place" adjoint implementation
***************************************************************************/
/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose.
* Thus, doing
* \code
* m.adjointInPlace();
* \endcode
* has the same effect on m as doing
* \code
* m = m.adjoint().eval();
* \endcode
* and is faster and also safer because in the latter line of code, forgetting the eval() results
* in a bug caused by aliasing.
*
* Notice however that this method is only useful if you want to replace a matrix by its own adjoint.
* If you just need the adjoint of a matrix, use adjoint().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), transposeInPlace() */
template<typename Derived>
inline void MatrixBase<Derived>::adjointInPlace()
{
derived() = adjoint().eval();
}
#ifndef EIGEN_NO_DEBUG
// The following is to detect aliasing problems in most common cases.
namespace internal {
template<bool DestIsTransposed, typename OtherDerived>
struct check_transpose_aliasing_compile_time_selector
{
enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
};
template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
{
enum { ret = bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
|| bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
};
};
template<typename Scalar, bool DestIsTransposed, typename OtherDerived>
struct check_transpose_aliasing_run_time_selector
{
static bool run(const Scalar* dest, const OtherDerived& src)
{
return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src));
}
};
template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
{
static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
{
return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs())))
|| ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs())));
}
};
// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,
// is because when the condition controlling the assert is known at compile time, ICC emits a warning.
// This is actually a good warning: in expressions that don't have any transposing, the condition is
// known at compile time to be false, and using that, we can avoid generating the code of the assert again
// and again for all these expressions that don't need it.
template<typename Derived, typename OtherDerived,
bool MightHaveTransposeAliasing
= check_transpose_aliasing_compile_time_selector
<blas_traits<Derived>::IsTransposed,OtherDerived>::ret
>
struct checkTransposeAliasing_impl
{
static void run(const Derived& dst, const OtherDerived& other)
{
eigen_assert((!check_transpose_aliasing_run_time_selector
<typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
::run(extract_data(dst), other))
&& "aliasing detected during transposition, use transposeInPlace() "
"or evaluate the rhs into a temporary using .eval()");
}
};
template<typename Derived, typename OtherDerived>
struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
{
static void run(const Derived&, const OtherDerived&)
{
}
};
template<typename Dst, typename Src>
void check_for_aliasing(const Dst &dst, const Src &src)
{
internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
}
} // end namespace internal
#endif // EIGEN_NO_DEBUG
} // end namespace Eigen
#endif // EIGEN_TRANSPOSE_H
| 14,777 | 35.579208 | 134 | h |
abess | abess-master/python/include/Eigen/src/Core/VectorBlock.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_VECTORBLOCK_H
#define EIGEN_VECTORBLOCK_H
namespace Eigen {
namespace internal {
template<typename VectorType, int Size>
struct traits<VectorBlock<VectorType, Size> >
: public traits<Block<VectorType,
traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
{
};
}
/** \class VectorBlock
* \ingroup Core_Module
*
* \brief Expression of a fixed-size or dynamic-size sub-vector
*
* \tparam VectorType the type of the object in which we are taking a sub-vector
* \tparam Size size of the sub-vector we are taking at compile time (optional)
*
* This class represents an expression of either a fixed-size or dynamic-size sub-vector.
* It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly maniputate sub-vector expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
* Here is an example illustrating the dynamic case:
* \include class_VectorBlock.cpp
* Output: \verbinclude class_VectorBlock.out
*
* \note Even though this expression has dynamic size, in the case where \a VectorType
* has fixed size, this expression inherits a fixed maximal size which means that evaluating
* it does not cause a dynamic memory allocation.
*
* Here is an example illustrating the fixed-size case:
* \include class_FixedVectorBlock.cpp
* Output: \verbinclude class_FixedVectorBlock.out
*
* \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
*/
template<typename VectorType, int Size> class VectorBlock
: public Block<VectorType,
internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>
{
typedef Block<VectorType,
internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;
enum {
IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)
};
public:
EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
using Base::operator=;
/** Dynamic-size constructor
*/
EIGEN_DEVICE_FUNC
inline VectorBlock(VectorType& vector, Index start, Index size)
: Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start,
IsColVector ? size : 1, IsColVector ? 1 : size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
}
/** Fixed-size constructor
*/
EIGEN_DEVICE_FUNC
inline VectorBlock(VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
}
};
} // end namespace Eigen
#endif // EIGEN_VECTORBLOCK_H
| 3,462 | 34.701031 | 99 | h |
abess | abess-master/python/include/Eigen/src/Core/arch/AVX/MathFunctions.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Pedro Gonnet ([email protected])
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATH_FUNCTIONS_AVX_H
#define EIGEN_MATH_FUNCTIONS_AVX_H
/* The sin, cos, exp, and log functions of this file are loosely derived from
* Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
*/
namespace Eigen {
namespace internal {
inline Packet8i pshiftleft(Packet8i v, int n)
{
#ifdef EIGEN_VECTORIZE_AVX2
return _mm256_slli_epi32(v, n);
#else
__m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);
__m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);
return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
#endif
}
inline Packet8f pshiftright(Packet8f v, int n)
{
#ifdef EIGEN_VECTORIZE_AVX2
return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));
#else
__m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);
__m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);
return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));
#endif
}
// Sine function
// Computes sin(x) by wrapping x to the interval [-Pi/4,3*Pi/4] and
// evaluating interpolants in [-Pi/4,Pi/4] or [Pi/4,3*Pi/4]. The interpolants
// are (anti-)symmetric and thus have only odd/even coefficients
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
psin<Packet8f>(const Packet8f& _x) {
Packet8f x = _x;
// Some useful values.
_EIGEN_DECLARE_CONST_Packet8i(one, 1);
_EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
_EIGEN_DECLARE_CONST_Packet8f(two, 2.0f);
_EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);
_EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);
_EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);
_EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);
_EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);
_EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);
// Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.
Packet8f z = pmul(x, p8f_one_over_pi);
Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));
x = pmadd(shift, p8f_neg_pi_first, x);
x = pmadd(shift, p8f_neg_pi_second, x);
x = pmadd(shift, p8f_neg_pi_third, x);
z = pmul(x, p8f_four_over_pi);
// Make a mask for the entries that need flipping, i.e. wherever the shift
// is odd.
Packet8i shift_ints = _mm256_cvtps_epi32(shift);
Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));
Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);
// Create a mask for which interpolant to use, i.e. if z > 1, then the mask
// is set to ones for that entry.
Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);
// Evaluate the polynomial for the interval [1,3] in z.
_EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);
Packet8f z_minus_two = psub(z, p8f_two);
Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);
Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);
right = pmadd(right, z_minus_two2, p8f_coeff_right_2);
right = pmadd(right, z_minus_two2, p8f_coeff_right_0);
// Evaluate the polynomial for the interval [-1,1] in z.
_EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);
_EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);
Packet8f z2 = pmul(z, z);
Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);
left = pmadd(left, z2, p8f_coeff_left_3);
left = pmadd(left, z2, p8f_coeff_left_1);
left = pmul(left, z);
// Assemble the results, i.e. select the left and right polynomials.
left = _mm256_andnot_ps(ival_mask, left);
right = _mm256_and_ps(ival_mask, right);
Packet8f res = _mm256_or_ps(left, right);
// Flip the sign on the odd intervals and return the result.
res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));
return res;
}
// Natural logarithm
// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
// be easily approximated by a polynomial centered on m=1 for stability.
// TODO(gonnet): Further reduce the interval allowing for lower-degree
// polynomial interpolants -> ... -> profit!
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
plog<Packet8f>(const Packet8f& _x) {
Packet8f x = _x;
_EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
_EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
_EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);
// The smallest non denormalized float number.
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);
// Polynomial coefficients.
_EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);
Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN
Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);
// Truncate input values to the minimum positive normal.
x = pmax(x, p8f_min_norm_pos);
Packet8f emm0 = pshiftright(x,23);
Packet8f e = _mm256_sub_ps(emm0, p8f_126f);
// Set the exponents to -1, i.e. x are in the range [0.5,1).
x = _mm256_and_ps(x, p8f_inv_mant_mask);
x = _mm256_or_ps(x, p8f_half);
// part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
// and shift by -1. The values are then centered around 0, which improves
// the stability of the polynomial evaluation.
// if( x < SQRTHF ) {
// e -= 1;
// x = x + x - 1.0;
// } else { x = x - 1.0; }
Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);
Packet8f tmp = _mm256_and_ps(x, mask);
x = psub(x, p8f_1);
e = psub(e, _mm256_and_ps(p8f_1, mask));
x = padd(x, tmp);
Packet8f x2 = pmul(x, x);
Packet8f x3 = pmul(x2, x);
// Evaluate the polynomial approximant of degree 8 in three parts, probably
// to improve instruction-level parallelism.
Packet8f y, y1, y2;
y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);
y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);
y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);
y = pmadd(y, x, p8f_cephes_log_p2);
y1 = pmadd(y1, x, p8f_cephes_log_p5);
y2 = pmadd(y2, x, p8f_cephes_log_p8);
y = pmadd(y, x3, y1);
y = pmadd(y, x3, y2);
y = pmul(y, x3);
// Add the logarithm of the exponent back to the result of the interpolation.
y1 = pmul(e, p8f_cephes_log_q1);
tmp = pmul(x2, p8f_half);
y = padd(y, y1);
x = psub(x, tmp);
y2 = pmul(e, p8f_cephes_log_q2);
x = padd(x, y);
x = padd(x, y2);
// Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
return _mm256_or_ps(
_mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),
_mm256_and_ps(iszero_mask, p8f_minus_inf));
}
// Exponential function. Works by writing "x = m*log(2) + r" where
// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
pexp<Packet8f>(const Packet8f& _x) {
_EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
_EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
_EIGEN_DECLARE_CONST_Packet8f(127, 127.0f);
_EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);
_EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);
// Clamp x.
Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);
// Express exp(x) as exp(m*ln(2) + r), start by extracting
// m = floor(x/ln(2) + 0.5).
Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));
// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
// truncation errors. Note that we don't use the "pmadd" function here to
// ensure that a precision-preserving FMA instruction is used.
#ifdef EIGEN_VECTORIZE_FMA
_EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);
Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);
#else
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);
_EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);
Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));
r = psub(r, pmul(m, p8f_cephes_exp_C2));
#endif
Packet8f r2 = pmul(r, r);
// TODO(gonnet): Split into odd/even polynomials and try to exploit
// instruction-level parallelism.
Packet8f y = p8f_cephes_exp_p0;
y = pmadd(y, r, p8f_cephes_exp_p1);
y = pmadd(y, r, p8f_cephes_exp_p2);
y = pmadd(y, r, p8f_cephes_exp_p3);
y = pmadd(y, r, p8f_cephes_exp_p4);
y = pmadd(y, r, p8f_cephes_exp_p5);
y = pmadd(y, r2, r);
y = padd(y, p8f_1);
// Build emm0 = 2^m.
Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
emm0 = pshiftleft(emm0, 23);
// Return 2^m * exp(r).
return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);
}
// Hyperbolic Tangent function.
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
ptanh<Packet8f>(const Packet8f& x) {
return internal::generic_fast_tanh_float(x);
}
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
pexp<Packet4d>(const Packet4d& _x) {
Packet4d x = _x;
_EIGEN_DECLARE_CONST_Packet4d(1, 1.0);
_EIGEN_DECLARE_CONST_Packet4d(2, 2.0);
_EIGEN_DECLARE_CONST_Packet4d(half, 0.5);
_EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);
_EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);
_EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);
_EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);
_EIGEN_DECLARE_CONST_Packet4i(1023, 1023);
Packet4d tmp, fx;
// clamp x
x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);
// Express exp(x) as exp(g + n*log(2)).
fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);
// Get the integer modulus of log(2), i.e. the "n" described above.
fx = _mm256_floor_pd(fx);
// Get the remainder modulo log(2), i.e. the "g" described above. Subtract
// n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
// digits right.
tmp = pmul(fx, p4d_cephes_exp_C1);
Packet4d z = pmul(fx, p4d_cephes_exp_C2);
x = psub(x, tmp);
x = psub(x, z);
Packet4d x2 = pmul(x, x);
// Evaluate the numerator polynomial of the rational interpolant.
Packet4d px = p4d_cephes_exp_p0;
px = pmadd(px, x2, p4d_cephes_exp_p1);
px = pmadd(px, x2, p4d_cephes_exp_p2);
px = pmul(px, x);
// Evaluate the denominator polynomial of the rational interpolant.
Packet4d qx = p4d_cephes_exp_q0;
qx = pmadd(qx, x2, p4d_cephes_exp_q1);
qx = pmadd(qx, x2, p4d_cephes_exp_q2);
qx = pmadd(qx, x2, p4d_cephes_exp_q3);
// I don't really get this bit, copied from the SSE2 routines, so...
// TODO(gonnet): Figure out what is going on here, perhaps find a better
// rational interpolant?
x = _mm256_div_pd(px, psub(qx, px));
x = pmadd(p4d_2, x, p4d_1);
// Build e=2^n by constructing the exponents in a 128-bit vector and
// shifting them to where they belong in double-precision values.
__m128i emm0 = _mm256_cvtpd_epi32(fx);
emm0 = _mm_add_epi32(emm0, p4i_1023);
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
__m128i lo = _mm_slli_epi64(emm0, 52);
__m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
__m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
e = _mm256_insertf128_si256(e, hi, 1);
// Construct the result 2^n * exp(g) = e * x. The max is used to catch
// non-finite values in the input.
return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);
}
// Functions for sqrt.
// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
// of Newton's method, at a cost of 1-2 bits of precision as opposed to the
// exact solution. It does not handle +inf, or denormalized numbers correctly.
// The main advantage of this approach is not just speed, but also the fact that
// it can be inlined and pipelined with other computations, further reducing its
// effective latency. This is similar to Quake3's fast inverse square root.
// For detail see here: http://www.beyond3d.com/content/articles/8/
#if EIGEN_FAST_MATH
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
psqrt<Packet8f>(const Packet8f& _x) {
Packet8f half = pmul(_x, pset1<Packet8f>(.5f));
Packet8f denormal_mask = _mm256_and_ps(
_mm256_cmp_ps(_x, pset1<Packet8f>((std::numeric_limits<float>::min)()),
_CMP_LT_OQ),
_mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ));
// Compute approximate reciprocal sqrt.
Packet8f x = _mm256_rsqrt_ps(_x);
// Do a single step of Newton's iteration.
x = pmul(x, psub(pset1<Packet8f>(1.5f), pmul(half, pmul(x,x))));
// Flush results for denormals to zero.
return _mm256_andnot_ps(denormal_mask, pmul(_x,x));
}
#else
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet8f psqrt<Packet8f>(const Packet8f& x) {
return _mm256_sqrt_ps(x);
}
#endif
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4d psqrt<Packet4d>(const Packet4d& x) {
return _mm256_sqrt_pd(x);
}
#if EIGEN_FAST_MATH
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000);
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(nan, 0x7fc00000);
_EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f);
_EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f);
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000);
Packet8f neg_half = pmul(_x, p8f_minus_half);
// select only the inverse sqrt of positive normal inputs (denormals are
// flushed to zero and cause infs as well).
Packet8f le_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ);
Packet8f x = _mm256_andnot_ps(le_zero_mask, _mm256_rsqrt_ps(_x));
// Fill in NaNs and Infs for the negative/zero entries.
Packet8f neg_mask = _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_LT_OQ);
Packet8f zero_mask = _mm256_andnot_ps(neg_mask, le_zero_mask);
Packet8f infs_and_nans = _mm256_or_ps(_mm256_and_ps(neg_mask, p8f_nan),
_mm256_and_ps(zero_mask, p8f_inf));
// Do a single step of Newton's iteration.
x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five));
// Insert NaNs and Infs in all the right places.
return _mm256_or_ps(x, infs_and_nans);
}
#else
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet8f prsqrt<Packet8f>(const Packet8f& x) {
_EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));
}
#endif
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4d prsqrt<Packet4d>(const Packet4d& x) {
_EIGEN_DECLARE_CONST_Packet4d(one, 1.0);
return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x));
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_MATH_FUNCTIONS_AVX_H
| 17,776 | 39.402273 | 123 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.