patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -57,8 +57,8 @@ func NewCmdPoolDescribe() *cobra.Command { Use: "describe", Short: "Describes the pools", Long: poolDescribeCommandHelpText, - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(options.runPoolDescribe(cmd), util.Fatal) + Run: func(_ *cobra.Command, args []string) { + util.CheckErr(options.runPoolDescribe(), util.Fatal) }, }
1
/* Copyright 2017 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pool import ( "fmt" "github.com/openebs/maya/pkg/client/mapiserver" "github.com/openebs/maya/pkg/util" "github.com/spf13/cobra" ) var ( poolDescribeCommandHelpText = ` This command displays available pools. Usage: mayactl pool decribe -poolname <PoolName> $ mayactl pool decribe -poolname <PoolName> ` ) const poolDescribeTemplate = ` Pool Details : -------------- Storage Pool Name : {{ .ObjectMeta.Name }} Node Name : {{ index .ObjectMeta.Labels "kubernetes.io/hostname" }} CAS Template Used : {{ index .ObjectMeta.Labels "openebs.io/cas-template-name" }} CAS Type : {{ index .ObjectMeta.Labels "openebs.io/cas-type" }} StoragePoolClaim : {{ index .ObjectMeta.Labels "openebs.io/storage-pool-claim" }} UID : {{ .ObjectMeta.UID }} Pool Type : {{ .Spec.PoolSpec.PoolType }} Over Provisioning : {{ .Spec.PoolSpec.OverProvisioning }} Disk List : ----------- {{ if eq (len .Spec.Group) 0 }}No disks present{{ else }}{{range $item := .Spec.Group }}{{range $disks := $item.Item }} {{printf "%s\n" $disks.Name }}{{ end }}{{ end }}{{ end }} ` // NewCmdPoolDescribe displays info of pool func NewCmdPoolDescribe() *cobra.Command { cmd := &cobra.Command{ Use: "describe", Short: "Describes the pools", Long: poolDescribeCommandHelpText, Run: func(cmd *cobra.Command, args []string) { util.CheckErr(options.runPoolDescribe(cmd), util.Fatal) }, } cmd.Flags().StringVarP(&options.poolName, "poolname", "", options.poolName, "a unique pool name.") return cmd } // runPoolDescrive makes pool-read API request to maya-apiserver func (c *CmdPoolOptions) runPoolDescribe(cmd *cobra.Command) error { if len(c.poolName) == 0 { return fmt.Errorf("error: --poolname not specified") } resp, err := mapiserver.ReadPool(c.poolName) if err != nil { return fmt.Errorf("Error Reading pool: %v", err) } return mapiserver.Print(poolDescribeTemplate, resp) }
1
14,672
It does not look idiomatic.
openebs-maya
go
@@ -3,9 +3,12 @@ package sql_test import ( "testing" + _ "github.com/mattn/go-sqlite3" + "github.com/DATA-DOG/go-sqlmock" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/flux" _ "github.com/influxdata/flux/builtin" // We need to import the builtins for the tests to work. "github.com/influxdata/flux/dependencies/dependenciestest"
1
package sql_test import ( "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/influxdata/flux" _ "github.com/influxdata/flux/builtin" // We need to import the builtins for the tests to work. "github.com/influxdata/flux/dependencies/dependenciestest" "github.com/influxdata/flux/dependencies/url" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/execute/executetest" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/querytest" "github.com/influxdata/flux/stdlib/influxdata/influxdb" fsql "github.com/influxdata/flux/stdlib/sql" "github.com/influxdata/flux/values" ) func TestSqlTo(t *testing.T) { tests := []querytest.NewQueryTestCase{ { Name: "from with database", Raw: `import "sql" from(bucket: "mybucket") |> sql.to(driverName:"sqlmock", dataSourceName:"root@/db", table:"TestTable")`, Want: &flux.Spec{ Operations: []*flux.Operation{ { ID: "from0", Spec: &influxdb.FromOpSpec{ Bucket: "mybucket", }, }, { ID: "toSQL1", Spec: &fsql.ToSQLOpSpec{ DriverName: "sqlmock", DataSourceName: "root@/db", Table: "TestTable", }, }, }, Edges: []flux.Edge{ {Parent: "from0", Child: "toSQL1"}, }, }, }, } for _, tc := range tests { tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() querytest.NewQueryTestHelper(t, tc) }) } } func TestToSQL_Process(t *testing.T) { driverName := "sqlmock" dsn := "root@/db" _, _, _ = sqlmock.NewWithDSN(dsn) type wanted struct { Table []*executetest.Table ColumnNames []string ValueStrings [][]string ValueArgs [][]interface{} } testCases := []struct { name string spec *fsql.ToSQLProcedureSpec data flux.Table want wanted }{ { name: "coltable with name in _measurement", spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: driverName, DataSourceName: dsn, Table: "TestTable2", }, }, data: executetest.MustCopyTable(&executetest.Table{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TFloat}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", 2.0, "one"}, {execute.Time(21), "a", 2.0, "one"}, {execute.Time(21), "b", 1.0, "seven"}, {execute.Time(31), "a", 3.0, "nine"}, {execute.Time(41), "c", 4.0, "elevendyone"}, }, }), want: wanted{ Table: []*executetest.Table{{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TFloat}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", 2.0, "one"}, {execute.Time(21), "a", 2.0, "one"}, {execute.Time(21), "b", 1.0, "seven"}, {execute.Time(31), "a", 3.0, "nine"}, {execute.Time(41), "c", 4.0, "elevendyone"}, }, }}, ColumnNames: []string{"_time", "_measurement", "_value", "fred"}, ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}}, ValueArgs: [][]interface{}{{ values.Time(int64(execute.Time(11))).Time(), "a", 2.0, "one", values.Time(int64(execute.Time(21))).Time(), "a", 2.0, "one", values.Time(int64(execute.Time(21))).Time(), "b", 1.0, "seven", values.Time(int64(execute.Time(31))).Time(), "a", 3.0, "nine", values.Time(int64(execute.Time(41))).Time(), "c", 4.0, "elevendyone"}}, }, }, { name: "coltable with ints", spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: driverName, DataSourceName: dsn, Table: "TestTable2", }, }, data: executetest.MustCopyTable(&executetest.Table{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TInt}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", int64(2), "one"}, {execute.Time(21), "a", int64(2), "one"}, {execute.Time(21), "b", int64(1), "seven"}, {execute.Time(31), "a", int64(3), "nine"}, {execute.Time(41), "c", int64(4), "elevendyone"}, }, }), want: wanted{ Table: []*executetest.Table{{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TInt}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", int64(2), "one"}, {execute.Time(21), "a", int64(2), "one"}, {execute.Time(21), "b", int64(1), "seven"}, {execute.Time(31), "a", int64(3), "nine"}, {execute.Time(41), "c", int64(4), "elevendyone"}, }, }}, ColumnNames: []string{"_time", "_measurement", "_value", "fred"}, ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}}, ValueArgs: [][]interface{}{{ values.Time(int64(execute.Time(11))).Time(), "a", int64(2), "one", values.Time(int64(execute.Time(21))).Time(), "a", int64(2), "one", values.Time(int64(execute.Time(21))).Time(), "b", int64(1), "seven", values.Time(int64(execute.Time(31))).Time(), "a", int64(3), "nine", values.Time(int64(execute.Time(41))).Time(), "c", int64(4), "elevendyone"}}, }, }, { name: "coltable with uints", spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: driverName, DataSourceName: dsn, Table: "TestTable2", }, }, data: executetest.MustCopyTable(&executetest.Table{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TUInt}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", uint64(2), "one"}, {execute.Time(21), "a", uint64(2), "one"}, {execute.Time(21), "b", uint64(1), "seven"}, {execute.Time(31), "a", uint64(3), "nine"}, {execute.Time(41), "c", uint64(4), "elevendyone"}, }, }), want: wanted{ Table: []*executetest.Table{{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TUInt}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", uint64(2), "one"}, {execute.Time(21), "a", uint64(2), "one"}, {execute.Time(21), "b", uint64(1), "seven"}, {execute.Time(31), "a", uint64(3), "nine"}, {execute.Time(41), "c", uint64(4), "elevendyone"}, }, }}, ColumnNames: []string{"_time", "_measurement", "_value", "fred"}, ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}}, ValueArgs: [][]interface{}{{ values.Time(int64(execute.Time(11))).Time(), "a", uint64(2), "one", values.Time(int64(execute.Time(21))).Time(), "a", uint64(2), "one", values.Time(int64(execute.Time(21))).Time(), "b", uint64(1), "seven", values.Time(int64(execute.Time(31))).Time(), "a", uint64(3), "nine", values.Time(int64(execute.Time(41))).Time(), "c", uint64(4), "elevendyone"}}, }, }, { name: "coltable with bool", spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: driverName, DataSourceName: dsn, Table: "TestTable2", }, }, data: executetest.MustCopyTable(&executetest.Table{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TBool}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", true, "one"}, {execute.Time(21), "a", true, "one"}, {execute.Time(21), "b", false, "seven"}, {execute.Time(31), "a", true, "nine"}, {execute.Time(41), "c", false, "elevendyone"}, }, }), want: wanted{ Table: []*executetest.Table{{ ColMeta: []flux.ColMeta{ {Label: "_time", Type: flux.TTime}, {Label: "_measurement", Type: flux.TString}, {Label: "_value", Type: flux.TBool}, {Label: "fred", Type: flux.TString}, }, Data: [][]interface{}{ {execute.Time(11), "a", true, "one"}, {execute.Time(21), "a", true, "one"}, {execute.Time(21), "b", false, "seven"}, {execute.Time(31), "a", true, "nine"}, {execute.Time(41), "c", false, "elevendyone"}, }, }}, ColumnNames: []string{"_time", "_measurement", "_value", "fred"}, ValueStrings: [][]string{{"(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)", "(?,?,?,?)"}}, ValueArgs: [][]interface{}{{ values.Time(int64(execute.Time(11))).Time(), "a", true, "one", values.Time(int64(execute.Time(21))).Time(), "a", true, "one", values.Time(int64(execute.Time(21))).Time(), "b", false, "seven", values.Time(int64(execute.Time(31))).Time(), "a", true, "nine", values.Time(int64(execute.Time(41))).Time(), "c", false, "elevendyone"}}, }, }, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { d := executetest.NewDataset(executetest.RandomDatasetID()) c := execute.NewTableBuilderCache(executetest.UnlimitedAllocator) c.SetTriggerSpec(plan.DefaultTriggerSpec) transformation, err := fsql.NewToSQLTransformation(d, dependenciestest.Default(), c, tc.spec) if err != nil { t.Fatal(err) } a := tc.data colNames, valStrings, valArgs, err := fsql.CreateInsertComponents(transformation, a) if err != nil { t.Fatal(err) } if !cmp.Equal(tc.want.ColumnNames, colNames, cmpopts.EquateNaNs()) { t.Log(cmp.Diff(tc.want.ColumnNames, colNames)) t.Fail() } if !cmp.Equal(tc.want.ValueStrings, valStrings, cmpopts.EquateNaNs()) { t.Log(cmp.Diff(tc.want.ValueStrings, valStrings)) t.Fail() } if !cmp.Equal(tc.want.ValueArgs, valArgs, cmpopts.EquateNaNs()) { t.Log(cmp.Diff(tc.want.ValueArgs, valArgs)) t.Fail() } }) } } func TestToSql_NewTransformation(t *testing.T) { test := executetest.TfUrlValidationTest{ CreateFn: func(d execute.Dataset, deps flux.Dependencies, cache execute.TableBuilderCache, spec plan.ProcedureSpec) (execute.Transformation, error) { return fsql.NewToSQLTransformation(d, deps, cache, spec.(*fsql.ToSQLProcedureSpec)) }, Cases: []executetest.TfUrlValidationTestCase{ { Name: "ok mysql", Spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: "mysql", DataSourceName: "username:password@tcp(localhost:12345)/dbname?param=value", }, }, WantErr: "connection refused", }, { Name: "ok postgres", Spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: "postgres", DataSourceName: "postgres://pqgotest:password@localhost:12345/pqgotest?sslmode=verify-full", }, }, WantErr: "connection refused", }, { Name: "invalid driver", Spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: "voltdb", DataSourceName: "voltdb://pqgotest:password@localhost:12345/pqgotest?sslmode=verify-full", }, }, WantErr: "sql driver voltdb not supported", }, { Name: "no such host", Spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: "mysql", DataSourceName: "username:password@tcp(notfound:12345)/dbname?param=value", }, }, WantErr: "no such host", }, { Name: "private ip", Spec: &fsql.ToSQLProcedureSpec{ Spec: &fsql.ToSQLOpSpec{ DriverName: "mysql", DataSourceName: "username:password@tcp(localhost:12345)/dbname?param=value", }, }, Validator: url.PrivateIPValidator{}, WantErr: "url is not valid, it connects to a private IP", }, }, } test.Run(t) }
1
12,454
Generally, we do not use newlines between project imports, we only separate them from the stdlib imports
influxdata-flux
go
@@ -770,6 +770,10 @@ define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "med var isProgressClear = state.MediaSource && null == state.MediaSource.RunTimeTicks; nowPlayingPositionSlider.setIsClear(isProgressClear); + if (nowPlayingItem.RunTimeTicks) { + nowPlayingPositionSlider.setKeyboardSteps(userSettings.skipBackLength() * 1000000 / nowPlayingItem.RunTimeTicks, userSettings.skipForwardLength() * 1000000 / nowPlayingItem.RunTimeTicks); + } + if (-1 === supportedCommands.indexOf("ToggleFullscreen") || player.isLocalPlayer && layoutManager.tv && playbackManager.isFullscreen(player)) { view.querySelector(".btnFullscreen").classList.add("hide"); } else {
1
define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "mediaInfo", "focusManager", "imageLoader", "scrollHelper", "events", "connectionManager", "browser", "globalize", "apphost", "layoutManager", "userSettings", "scrollStyles", "emby-slider", "paper-icon-button-light", "css!css/videoosd"], function (playbackManager, dom, inputManager, datetime, itemHelper, mediaInfo, focusManager, imageLoader, scrollHelper, events, connectionManager, browser, globalize, appHost, layoutManager, userSettings) { "use strict"; function seriesImageUrl(item, options) { if ("Episode" !== item.Type) { return null; } options = options || {}; options.type = options.type || "Primary"; if ("Primary" === options.type && item.SeriesPrimaryImageTag) { options.tag = item.SeriesPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if ("Thumb" === options.type) { if (item.SeriesThumbImageTag) { options.tag = item.SeriesThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if (item.ParentThumbImageTag) { options.tag = item.ParentThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options); } } return null; } function imageUrl(item, options) { options = options || {}; options.type = options.type || "Primary"; if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.PrimaryImageItemId || item.Id, options); } if ("Primary" === options.type && item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options); } return null; } function logoImageUrl(item, apiClient, options) { options = options || {}; options.type = "Logo"; if (item.ImageTags && item.ImageTags.Logo) { options.tag = item.ImageTags.Logo; return apiClient.getScaledImageUrl(item.Id, options); } if (item.ParentLogoImageTag) { options.tag = item.ParentLogoImageTag; return apiClient.getScaledImageUrl(item.ParentLogoItemId, options); } return null; } return function (view, params) { function onVerticalSwipe(e, elem, data) { var player = currentPlayer; if (player) { var deltaY = data.currentDeltaY; var windowSize = dom.getWindowSize(); if (supportsBrightnessChange && data.clientX < windowSize.innerWidth / 2) { return void doBrightnessTouch(deltaY, player, windowSize.innerHeight); } doVolumeTouch(deltaY, player, windowSize.innerHeight); } } function doBrightnessTouch(deltaY, player, viewHeight) { var delta = -deltaY / viewHeight * 100; var newValue = playbackManager.getBrightness(player) + delta; newValue = Math.min(newValue, 100); newValue = Math.max(newValue, 0); playbackManager.setBrightness(newValue, player); } function doVolumeTouch(deltaY, player, viewHeight) { var delta = -deltaY / viewHeight * 100; var newValue = playbackManager.getVolume(player) + delta; newValue = Math.min(newValue, 100); newValue = Math.max(newValue, 0); playbackManager.setVolume(newValue, player); } function onDoubleClick(e) { var clientX = e.clientX; if (null != clientX) { if (clientX < dom.getWindowSize().innerWidth / 2) { playbackManager.rewind(currentPlayer); } else { playbackManager.fastForward(currentPlayer); } e.preventDefault(); e.stopPropagation(); } } function getDisplayItem(item) { if ("TvChannel" === item.Type) { var apiClient = connectionManager.getApiClient(item.ServerId); return apiClient.getItem(apiClient.getCurrentUserId(), item.Id).then(function (refreshedItem) { return { originalItem: refreshedItem, displayItem: refreshedItem.CurrentProgram }; }); } return Promise.resolve({ originalItem: item }); } function updateRecordingButton(item) { if (!item || "Program" !== item.Type) { if (recordingButtonManager) { recordingButtonManager.destroy(); recordingButtonManager = null; } return void view.querySelector(".btnRecord").classList.add("hide"); } connectionManager.getApiClient(item.ServerId).getCurrentUser().then(function (user) { if (user.Policy.EnableLiveTvManagement) { require(["recordingButton"], function (RecordingButton) { if (recordingButtonManager) { return void recordingButtonManager.refreshItem(item); } recordingButtonManager = new RecordingButton({ item: item, button: view.querySelector(".btnRecord") }); view.querySelector(".btnRecord").classList.remove("hide"); }); } }); } function updateDisplayItem(itemInfo) { var item = itemInfo.originalItem; currentItem = item; var displayItem = itemInfo.displayItem || item; updateRecordingButton(displayItem); setPoster(displayItem, item); var parentName = displayItem.SeriesName || displayItem.Album; if (displayItem.EpisodeTitle || displayItem.IsSeries) { parentName = displayItem.Name; } setTitle(displayItem, parentName); var titleElement; var osdTitle = view.querySelector(".osdTitle"); titleElement = osdTitle; var displayName = itemHelper.getDisplayName(displayItem, { includeParentInfo: "Program" !== displayItem.Type, includeIndexNumber: "Program" !== displayItem.Type }); if (!displayName) { displayItem.Type; } titleElement.innerHTML = displayName; if (displayName) { titleElement.classList.remove("hide"); } else { titleElement.classList.add("hide"); } var mediaInfoHtml = mediaInfo.getPrimaryMediaInfoHtml(displayItem, { runtime: false, subtitles: false, tomatoes: false, endsAt: false, episodeTitle: false, originalAirDate: "Program" !== displayItem.Type, episodeTitleIndexNumber: "Program" !== displayItem.Type, programIndicator: false }); var osdMediaInfo = view.querySelector(".osdMediaInfo"); osdMediaInfo.innerHTML = mediaInfoHtml; if (mediaInfoHtml) { osdMediaInfo.classList.remove("hide"); } else { osdMediaInfo.classList.add("hide"); } var secondaryMediaInfo = view.querySelector(".osdSecondaryMediaInfo"); var secondaryMediaInfoHtml = mediaInfo.getSecondaryMediaInfoHtml(displayItem, { startDate: false, programTime: false }); secondaryMediaInfo.innerHTML = secondaryMediaInfoHtml; if (secondaryMediaInfoHtml) { secondaryMediaInfo.classList.remove("hide"); } else { secondaryMediaInfo.classList.add("hide"); } if (displayName) { view.querySelector(".osdMainTextContainer").classList.remove("hide"); } else { view.querySelector(".osdMainTextContainer").classList.add("hide"); } if (enableProgressByTimeOfDay) { setDisplayTime(startTimeText, displayItem.StartDate); setDisplayTime(endTimeText, displayItem.EndDate); startTimeText.classList.remove("hide"); endTimeText.classList.remove("hide"); programStartDateMs = displayItem.StartDate ? datetime.parseISO8601Date(displayItem.StartDate).getTime() : 0; programEndDateMs = displayItem.EndDate ? datetime.parseISO8601Date(displayItem.EndDate).getTime() : 0; } else { startTimeText.classList.add("hide"); endTimeText.classList.add("hide"); startTimeText.innerHTML = ""; endTimeText.innerHTML = ""; programStartDateMs = 0; programEndDateMs = 0; } } function getDisplayTimeWithoutAmPm(date, showSeconds) { if (showSeconds) { return datetime.toLocaleTimeString(date, { hour: "numeric", minute: "2-digit", second: "2-digit" }).toLowerCase().replace("am", "").replace("pm", "").trim(); } return datetime.getDisplayTime(date).toLowerCase().replace("am", "").replace("pm", "").trim(); } function setDisplayTime(elem, date) { var html; if (date) { date = datetime.parseISO8601Date(date); html = getDisplayTimeWithoutAmPm(date); } elem.innerHTML = html || ""; } function shouldEnableProgressByTimeOfDay(item) { return !("TvChannel" !== item.Type || !item.CurrentProgram); } function updateNowPlayingInfo(player, state) { var item = state.NowPlayingItem; currentItem = item; if (!item) { setPoster(null); updateRecordingButton(null); Emby.Page.setTitle(""); nowPlayingVolumeSlider.disabled = true; nowPlayingPositionSlider.disabled = true; btnFastForward.disabled = true; btnRewind.disabled = true; view.querySelector(".btnSubtitles").classList.add("hide"); view.querySelector(".btnAudio").classList.add("hide"); view.querySelector(".osdTitle").innerHTML = ""; view.querySelector(".osdMediaInfo").innerHTML = ""; return; } enableProgressByTimeOfDay = shouldEnableProgressByTimeOfDay(item); getDisplayItem(item).then(updateDisplayItem); nowPlayingVolumeSlider.disabled = false; nowPlayingPositionSlider.disabled = false; btnFastForward.disabled = false; btnRewind.disabled = false; if (playbackManager.subtitleTracks(player).length) { view.querySelector(".btnSubtitles").classList.remove("hide"); toggleSubtitleSync(); } else { view.querySelector(".btnSubtitles").classList.add("hide"); toggleSubtitleSync("forceToHide"); } if (playbackManager.audioTracks(player).length > 1) { view.querySelector(".btnAudio").classList.remove("hide"); } else { view.querySelector(".btnAudio").classList.add("hide"); } } function setTitle(item, parentName) { var url = logoImageUrl(item, connectionManager.getApiClient(item.ServerId), {}); if (url) { Emby.Page.setTitle(""); var pageTitle = document.querySelector(".pageTitle"); pageTitle.style.backgroundImage = "url('" + url + "')"; pageTitle.classList.add("pageTitleWithLogo"); pageTitle.classList.remove("pageTitleWithDefaultLogo"); pageTitle.innerHTML = ""; } else { Emby.Page.setTitle(parentName || ""); } var documentTitle = parentName || (item ? item.Name : null); if (documentTitle) { document.title = documentTitle; } } function setPoster(item, secondaryItem) { var osdPoster = view.querySelector(".osdPoster"); if (item) { var imgUrl = seriesImageUrl(item, { type: "Primary" }) || seriesImageUrl(item, { type: "Thumb" }) || imageUrl(item, { type: "Primary" }); if (!imgUrl && secondaryItem && (imgUrl = seriesImageUrl(secondaryItem, { type: "Primary" }) || seriesImageUrl(secondaryItem, { type: "Thumb" }) || imageUrl(secondaryItem, { type: "Primary" })), imgUrl) { return void (osdPoster.innerHTML = '<img src="' + imgUrl + '" />'); } } osdPoster.innerHTML = ""; } function showOsd() { slideDownToShow(headerElement); showMainOsdControls(); startOsdHideTimer(); } function hideOsd() { slideUpToHide(headerElement); hideMainOsdControls(); } function toggleOsd() { if ("osd" === currentVisibleMenu) { hideOsd(); } else if (!currentVisibleMenu) { showOsd(); } } function startOsdHideTimer() { stopOsdHideTimer(); osdHideTimeout = setTimeout(hideOsd, 5e3); } function stopOsdHideTimer() { if (osdHideTimeout) { clearTimeout(osdHideTimeout); osdHideTimeout = null; } } function slideDownToShow(elem) { elem.classList.remove("osdHeader-hidden"); } function slideUpToHide(elem) { elem.classList.add("osdHeader-hidden"); } function clearHideAnimationEventListeners(elem) { dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); } function onHideAnimationComplete(e) { var elem = e.target; elem.classList.add("hide"); dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); } function showMainOsdControls() { if (!currentVisibleMenu) { var elem = osdBottomElement; currentVisibleMenu = "osd"; clearHideAnimationEventListeners(elem); elem.classList.remove("hide"); elem.classList.remove("videoOsdBottom-hidden"); if (!layoutManager.mobile) { setTimeout(function () { focusManager.focus(elem.querySelector(".btnPause")); }, 50); } toggleSubtitleSync(); } } function hideMainOsdControls() { if ("osd" === currentVisibleMenu) { var elem = osdBottomElement; clearHideAnimationEventListeners(elem); elem.classList.add("videoOsdBottom-hidden"); dom.addEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); currentVisibleMenu = null; toggleSubtitleSync("hide"); } } function onPointerMove(e) { if ("mouse" === (e.pointerType || (layoutManager.mobile ? "touch" : "mouse"))) { var eventX = e.screenX || 0; var eventY = e.screenY || 0; var obj = lastPointerMoveData; if (!obj) { lastPointerMoveData = { x: eventX, y: eventY }; return; } if (Math.abs(eventX - obj.x) < 10 && Math.abs(eventY - obj.y) < 10) { return; } obj.x = eventX; obj.y = eventY; showOsd(); } } function onInputCommand(e) { var player = currentPlayer; switch (e.detail.command) { case "left": if ("osd" === currentVisibleMenu) { showOsd(); } else { if (!currentVisibleMenu) { e.preventDefault(); playbackManager.rewind(player); } } break; case "right": if ("osd" === currentVisibleMenu) { showOsd(); } else if (!currentVisibleMenu) { e.preventDefault(); playbackManager.fastForward(player); } break; case "pageup": playbackManager.nextChapter(player); break; case "pagedown": playbackManager.previousChapter(player); break; case "up": case "down": case "select": case "menu": case "info": case "play": case "playpause": case "pause": case "fastforward": case "rewind": case "next": case "previous": showOsd(); break; case "record": onRecordingCommand(); showOsd(); break; case "togglestats": toggleStats(); } } function onRecordingCommand() { var btnRecord = view.querySelector(".btnRecord"); if (!btnRecord.classList.contains("hide")) { btnRecord.click(); } } function updateFullscreenIcon() { if (playbackManager.isFullscreen(currentPlayer)) { view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("ExitFullscreen")); view.querySelector(".btnFullscreen i").innerHTML = "&#xE5D1;"; } else { view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("Fullscreen") + " (f)"); view.querySelector(".btnFullscreen i").innerHTML = "&#xE5D0;"; } } function onPlayerChange() { bindToPlayer(playbackManager.getCurrentPlayer()); } function onStateChanged(event, state) { var player = this; if (state.NowPlayingItem) { isEnabled = true; updatePlayerStateInternal(event, player, state); updatePlaylist(player); enableStopOnBack(true); } } function onPlayPauseStateChanged(e) { if (isEnabled) { updatePlayPauseState(this.paused()); } } function onVolumeChanged(e) { if (isEnabled) { var player = this; updatePlayerVolumeState(player, player.isMuted(), player.getVolume()); } } function onPlaybackStart(e, state) { console.log("nowplaying event: " + e.type); var player = this; onStateChanged.call(player, e, state); resetUpNextDialog(); } function resetUpNextDialog() { comingUpNextDisplayed = false; var dlg = currentUpNextDialog; if (dlg) { dlg.destroy(); currentUpNextDialog = null; } } function onPlaybackStopped(e, state) { currentRuntimeTicks = null; resetUpNextDialog(); console.log("nowplaying event: " + e.type); if ("Video" !== state.NextMediaType) { view.removeEventListener("viewbeforehide", onViewHideStopPlayback); Emby.Page.back(); } } function onMediaStreamsChanged(e) { var player = this; var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); } function onBeginFetch() { document.querySelector(".osdMediaStatus").classList.remove("hide"); } function onEndFetch() { document.querySelector(".osdMediaStatus").classList.add("hide"); } function bindToPlayer(player) { if (player !== currentPlayer) { releaseCurrentPlayer(); currentPlayer = player; if (!player) return; } var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); events.on(player, "playbackstart", onPlaybackStart); events.on(player, "playbackstop", onPlaybackStopped); events.on(player, "volumechange", onVolumeChanged); events.on(player, "pause", onPlayPauseStateChanged); events.on(player, "unpause", onPlayPauseStateChanged); events.on(player, "timeupdate", onTimeUpdate); events.on(player, "fullscreenchange", updateFullscreenIcon); events.on(player, "mediastreamschange", onMediaStreamsChanged); events.on(player, "beginFetch", onBeginFetch); events.on(player, "endFetch", onEndFetch); resetUpNextDialog(); if (player.isFetching) { onBeginFetch(); } } function releaseCurrentPlayer() { destroyStats(); destroySubtitleSync(); resetUpNextDialog(); var player = currentPlayer; if (player) { events.off(player, "playbackstart", onPlaybackStart); events.off(player, "playbackstop", onPlaybackStopped); events.off(player, "volumechange", onVolumeChanged); events.off(player, "pause", onPlayPauseStateChanged); events.off(player, "unpause", onPlayPauseStateChanged); events.off(player, "timeupdate", onTimeUpdate); events.off(player, "fullscreenchange", updateFullscreenIcon); events.off(player, "mediastreamschange", onMediaStreamsChanged); currentPlayer = null; } } function onTimeUpdate(e) { if (isEnabled) { var now = new Date().getTime(); if (!(now - lastUpdateTime < 700)) { lastUpdateTime = now; var player = this; currentRuntimeTicks = playbackManager.duration(player); var currentTime = playbackManager.currentTime(player); updateTimeDisplay(currentTime, currentRuntimeTicks, playbackManager.playbackStartTime(player), playbackManager.getBufferedRanges(player)); var item = currentItem; refreshProgramInfoIfNeeded(player, item); showComingUpNextIfNeeded(player, item, currentTime, currentRuntimeTicks); } } } function showComingUpNextIfNeeded(player, currentItem, currentTimeTicks, runtimeTicks) { if (runtimeTicks && currentTimeTicks && !comingUpNextDisplayed && !currentVisibleMenu && "Episode" === currentItem.Type && userSettings.enableNextVideoInfoOverlay()) { var showAtSecondsLeft = runtimeTicks >= 3e10 ? 40 : runtimeTicks >= 24e9 ? 35 : 30; var showAtTicks = runtimeTicks - 1e3 * showAtSecondsLeft * 1e4; var timeRemainingTicks = runtimeTicks - currentTimeTicks; if (currentTimeTicks >= showAtTicks && runtimeTicks >= 6e9 && timeRemainingTicks >= 2e8) { showComingUpNext(player); } } } function onUpNextHidden() { if ("upnext" === currentVisibleMenu) { currentVisibleMenu = null; } } function showComingUpNext(player) { require(["upNextDialog"], function (UpNextDialog) { if (!(currentVisibleMenu || currentUpNextDialog)) { currentVisibleMenu = "upnext"; comingUpNextDisplayed = true; playbackManager.nextItem(player).then(function (nextItem) { currentUpNextDialog = new UpNextDialog({ parent: view.querySelector(".upNextContainer"), player: player, nextItem: nextItem }); events.on(currentUpNextDialog, "hide", onUpNextHidden); }, onUpNextHidden); } }); } function refreshProgramInfoIfNeeded(player, item) { if ("TvChannel" === item.Type) { var program = item.CurrentProgram; if (program && program.EndDate) { try { var endDate = datetime.parseISO8601Date(program.EndDate); if (new Date().getTime() >= endDate.getTime()) { console.log("program info needs to be refreshed"); var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); } } catch (e) { console.log("Error parsing date: " + program.EndDate); } } } } function updatePlayPauseState(isPaused) { var button = view.querySelector(".btnPause i"); if (isPaused) { button.innerHTML = "&#xE037;"; button.setAttribute("title", globalize.translate("ButtonPlay") + " (k)"); } else { button.innerHTML = "&#xE034;"; button.setAttribute("title", globalize.translate("ButtonPause") + " (k)"); } } function updatePlayerStateInternal(event, player, state) { var playState = state.PlayState || {}; updatePlayPauseState(playState.IsPaused); var supportedCommands = playbackManager.getSupportedCommands(player); currentPlayerSupportedCommands = supportedCommands; supportsBrightnessChange = -1 !== supportedCommands.indexOf("SetBrightness"); updatePlayerVolumeState(player, playState.IsMuted, playState.VolumeLevel); if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { nowPlayingPositionSlider.disabled = !playState.CanSeek; } btnFastForward.disabled = !playState.CanSeek; btnRewind.disabled = !playState.CanSeek; var nowPlayingItem = state.NowPlayingItem || {}; playbackStartTimeTicks = playState.PlaybackStartTimeTicks; updateTimeDisplay(playState.PositionTicks, nowPlayingItem.RunTimeTicks, playState.PlaybackStartTimeTicks, playState.BufferedRanges || []); updateNowPlayingInfo(player, state); if (state.MediaSource && state.MediaSource.SupportsTranscoding && -1 !== supportedCommands.indexOf("SetMaxStreamingBitrate")) { view.querySelector(".btnVideoOsdSettings").classList.remove("hide"); } else { view.querySelector(".btnVideoOsdSettings").classList.add("hide"); } var isProgressClear = state.MediaSource && null == state.MediaSource.RunTimeTicks; nowPlayingPositionSlider.setIsClear(isProgressClear); if (-1 === supportedCommands.indexOf("ToggleFullscreen") || player.isLocalPlayer && layoutManager.tv && playbackManager.isFullscreen(player)) { view.querySelector(".btnFullscreen").classList.add("hide"); } else { view.querySelector(".btnFullscreen").classList.remove("hide"); } if (-1 === supportedCommands.indexOf("PictureInPicture")) { view.querySelector(".btnPip").classList.add("hide"); } else { view.querySelector(".btnPip").classList.remove("hide"); } updateFullscreenIcon(); } function getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs) { return (currentTimeMs - programStartDateMs) / programRuntimeMs * 100; } function updateTimeDisplay(positionTicks, runtimeTicks, playbackStartTimeTicks, bufferedRanges) { if (enableProgressByTimeOfDay) { if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { if (programStartDateMs && programEndDateMs) { var currentTimeMs = (playbackStartTimeTicks + (positionTicks || 0)) / 1e4; var programRuntimeMs = programEndDateMs - programStartDateMs; if (nowPlayingPositionSlider.value = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs), bufferedRanges.length) { var rangeStart = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].start || 0)) / 1e4); var rangeEnd = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].end || 0)) / 1e4); nowPlayingPositionSlider.setBufferedRanges([{ start: rangeStart, end: rangeEnd }]); } else { nowPlayingPositionSlider.setBufferedRanges([]); } } else { nowPlayingPositionSlider.value = 0; nowPlayingPositionSlider.setBufferedRanges([]); } } nowPlayingPositionText.innerHTML = ""; nowPlayingDurationText.innerHTML = ""; } else { if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { if (runtimeTicks) { var pct = positionTicks / runtimeTicks; pct *= 100; nowPlayingPositionSlider.value = pct; } else { nowPlayingPositionSlider.value = 0; } if (runtimeTicks && null != positionTicks && currentRuntimeTicks && !enableProgressByTimeOfDay && currentItem.RunTimeTicks && "Recording" !== currentItem.Type) { endsAtText.innerHTML = "&nbsp;&nbsp;-&nbsp;&nbsp;" + mediaInfo.getEndsAtFromPosition(runtimeTicks, positionTicks, true); } else { endsAtText.innerHTML = ""; } } if (nowPlayingPositionSlider) { nowPlayingPositionSlider.setBufferedRanges(bufferedRanges, runtimeTicks, positionTicks); } updateTimeText(nowPlayingPositionText, positionTicks); updateTimeText(nowPlayingDurationText, runtimeTicks, true); } } function updatePlayerVolumeState(player, isMuted, volumeLevel) { var supportedCommands = currentPlayerSupportedCommands; var showMuteButton = true; var showVolumeSlider = true; var volumeSlider = view.querySelector('.osdVolumeSliderContainer'); var progressElement = volumeSlider.querySelector('.mdl-slider-background-lower'); if (-1 === supportedCommands.indexOf("Mute")) { showMuteButton = false; } if (-1 === supportedCommands.indexOf("SetVolume")) { showVolumeSlider = false; } if (player.isLocalPlayer && appHost.supports("physicalvolumecontrol")) { showMuteButton = false; showVolumeSlider = false; } if (isMuted) { view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Unmute") + " (m)"); view.querySelector(".buttonMute i").innerHTML = "&#xE04F;"; } else { view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Mute") + " (m)"); view.querySelector(".buttonMute i").innerHTML = "&#xE050;"; } if (progressElement) { progressElement.style.width = (volumeLevel || 0) + '%'; } if (showMuteButton) { view.querySelector(".buttonMute").classList.remove("hide"); } else { view.querySelector(".buttonMute").classList.add("hide"); } if (nowPlayingVolumeSlider) { if (showVolumeSlider) { nowPlayingVolumeSliderContainer.classList.remove("hide"); } else { nowPlayingVolumeSliderContainer.classList.add("hide"); } if (!nowPlayingVolumeSlider.dragging) { nowPlayingVolumeSlider.value = volumeLevel || 0; } } } function updatePlaylist(player) { var btnPreviousTrack = view.querySelector(".btnPreviousTrack"); var btnNextTrack = view.querySelector(".btnNextTrack"); btnPreviousTrack.classList.remove("hide"); btnNextTrack.classList.remove("hide"); btnNextTrack.disabled = false; btnPreviousTrack.disabled = false; } function updateTimeText(elem, ticks, divider) { if (null == ticks) { elem.innerHTML = ""; return; } var html = datetime.getDisplayRunningTime(ticks); if (divider) { html = "&nbsp;/&nbsp;" + html; } elem.innerHTML = html; } function onSettingsButtonClick(e) { var btn = this; require(["playerSettingsMenu"], function (playerSettingsMenu) { var player = currentPlayer; if (player) { // show subtitle offset feature only if player and media support it var showSubOffset = playbackManager.supportSubtitleOffset(player) && playbackManager.canHandleOffsetOnCurrentSubtitle(player); playerSettingsMenu.show({ mediaType: "Video", player: player, positionTo: btn, stats: true, suboffset: showSubOffset, onOption: onSettingsOption }); } }); } function onSettingsOption(selectedOption) { if ("stats" === selectedOption) { toggleStats(); } else if ("suboffset" === selectedOption) { var player = currentPlayer; if (player) { playbackManager.enableShowingSubtitleOffset(player); toggleSubtitleSync(); } } } function toggleStats() { require(["playerStats"], function (PlayerStats) { var player = currentPlayer; if (player) { if (statsOverlay) { statsOverlay.toggle(); } else { statsOverlay = new PlayerStats({ player: player }); } } }); } function destroyStats() { if (statsOverlay) { statsOverlay.destroy(); statsOverlay = null; } } function showAudioTrackSelection() { var player = currentPlayer; var audioTracks = playbackManager.audioTracks(player); var currentIndex = playbackManager.getAudioStreamIndex(player); var menuItems = audioTracks.map(function (stream) { var opt = { name: stream.DisplayTitle, id: stream.Index }; if (stream.Index === currentIndex) { opt.selected = true; } return opt; }); var positionTo = this; require(["actionsheet"], function (actionsheet) { actionsheet.show({ items: menuItems, title: globalize.translate("Audio"), positionTo: positionTo }).then(function (id) { var index = parseInt(id); if (index !== currentIndex) { playbackManager.setAudioStreamIndex(index, player); } }); }); } function showSubtitleTrackSelection() { var player = currentPlayer; var streams = playbackManager.subtitleTracks(player); var currentIndex = playbackManager.getSubtitleStreamIndex(player); if (null == currentIndex) { currentIndex = -1; } streams.unshift({ Index: -1, DisplayTitle: globalize.translate("Off") }); var menuItems = streams.map(function (stream) { var opt = { name: stream.DisplayTitle, id: stream.Index }; if (stream.Index === currentIndex) { opt.selected = true; } return opt; }); var positionTo = this; require(["actionsheet"], function (actionsheet) { actionsheet.show({ title: globalize.translate("Subtitles"), items: menuItems, positionTo: positionTo }).then(function (id) { var index = parseInt(id); if (index !== currentIndex) { playbackManager.setSubtitleStreamIndex(index, player); } toggleSubtitleSync(); }); }); } function toggleSubtitleSync(action) { require(["subtitleSync"], function (SubtitleSync) { var player = currentPlayer; if (subtitleSyncOverlay) { subtitleSyncOverlay.toggle(action); } else if(player){ subtitleSyncOverlay = new SubtitleSync(player); } }); } function destroySubtitleSync() { if (subtitleSyncOverlay) { subtitleSyncOverlay.destroy(); subtitleSyncOverlay = null; } } function onWindowKeyDown(e) { if (!currentVisibleMenu && 32 === e.keyCode) { playbackManager.playPause(currentPlayer); return void showOsd(); } switch (e.key) { case "k": playbackManager.playPause(currentPlayer); showOsd(); break; case "l": case "ArrowRight": case "Right": playbackManager.fastForward(currentPlayer); showOsd(); break; case "j": case "ArrowLeft": case "Left": playbackManager.rewind(currentPlayer); showOsd(); break; case "f": if (!e.ctrlKey && !e.metaKey) { playbackManager.toggleFullscreen(currentPlayer); showOsd(); } break; case "m": playbackManager.toggleMute(currentPlayer); showOsd(); break; case "NavigationLeft": case "GamepadDPadLeft": case "GamepadLeftThumbstickLeft": // Ignores gamepad events that are always triggered, even when not focused. if (document.hasFocus()) { playbackManager.rewind(currentPlayer); showOsd(); } break; case "NavigationRight": case "GamepadDPadRight": case "GamepadLeftThumbstickRight": // Ignores gamepad events that are always triggered, even when not focused. if (document.hasFocus()) { playbackManager.fastForward(currentPlayer); showOsd(); } } } function getImgUrl(item, chapter, index, maxWidth, apiClient) { if (chapter.ImageTag) { return apiClient.getScaledImageUrl(item.Id, { maxWidth: maxWidth, tag: chapter.ImageTag, type: "Chapter", index: index }); } return null; } function getChapterBubbleHtml(apiClient, item, chapters, positionTicks) { var chapter; var index = -1; for (var i = 0, length = chapters.length; i < length; i++) { var currentChapter = chapters[i]; if (positionTicks >= currentChapter.StartPositionTicks) { chapter = currentChapter; index = i; } } if (!chapter) { return null; } var src = getImgUrl(item, chapter, index, 400, apiClient); if (src) { var html = '<div class="chapterThumbContainer">'; html += '<img class="chapterThumb" src="' + src + '" />'; html += '<div class="chapterThumbTextContainer">'; html += '<div class="chapterThumbText chapterThumbText-dim">'; html += chapter.Name; html += "</div>"; html += '<h2 class="chapterThumbText">'; html += datetime.getDisplayRunningTime(positionTicks); html += "</h2>"; html += "</div>"; return html + "</div>"; } return null; } function onViewHideStopPlayback() { if (playbackManager.isPlayingVideo()) { require(['shell'], function (shell) { shell.disableFullscreen(); }); var player = currentPlayer; view.removeEventListener("viewbeforehide", onViewHideStopPlayback); releaseCurrentPlayer(); playbackManager.stop(player); } } function enableStopOnBack(enabled) { view.removeEventListener("viewbeforehide", onViewHideStopPlayback); if (enabled && playbackManager.isPlayingVideo(currentPlayer)) { view.addEventListener("viewbeforehide", onViewHideStopPlayback); } } require(['shell'], function (shell) { shell.enableFullscreen(); }); var currentPlayer; var comingUpNextDisplayed; var currentUpNextDialog; var isEnabled; var currentItem; var recordingButtonManager; var enableProgressByTimeOfDay; var supportsBrightnessChange; var currentVisibleMenu; var statsOverlay; var osdHideTimeout; var lastPointerMoveData; var self = this; var currentPlayerSupportedCommands = []; var currentRuntimeTicks = 0; var lastUpdateTime = 0; var programStartDateMs = 0; var programEndDateMs = 0; var playbackStartTimeTicks = 0; var subtitleSyncOverlay; var volumeSliderTimer; var nowPlayingVolumeSlider = view.querySelector(".osdVolumeSlider"); var nowPlayingVolumeSliderContainer = view.querySelector(".osdVolumeSliderContainer"); var nowPlayingPositionSlider = view.querySelector(".osdPositionSlider"); var nowPlayingPositionText = view.querySelector(".osdPositionText"); var nowPlayingDurationText = view.querySelector(".osdDurationText"); var startTimeText = view.querySelector(".startTimeText"); var endTimeText = view.querySelector(".endTimeText"); var endsAtText = view.querySelector(".endsAtText"); var btnRewind = view.querySelector(".btnRewind"); var btnFastForward = view.querySelector(".btnFastForward"); var transitionEndEventName = dom.whichTransitionEvent(); var headerElement = document.querySelector(".skinHeader"); var osdBottomElement = document.querySelector(".videoOsdBottom-maincontrols"); view.addEventListener("viewbeforeshow", function (e) { headerElement.classList.add("osdHeader"); Emby.Page.setTransparency("full"); }); view.addEventListener("viewshow", function (e) { try { events.on(playbackManager, "playerchange", onPlayerChange); bindToPlayer(playbackManager.getCurrentPlayer()); dom.addEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, { passive: true }); showOsd(); inputManager.on(window, onInputCommand); dom.addEventListener(window, "keydown", onWindowKeyDown, { passive: true }); } catch(e) { require(['appRouter'], function(appRouter) { appRouter.showDirect('/'); }); } }); view.addEventListener("viewbeforehide", function () { if (statsOverlay) { statsOverlay.enabled(false); } dom.removeEventListener(window, "keydown", onWindowKeyDown, { passive: true }); stopOsdHideTimer(); headerElement.classList.remove("osdHeader"); headerElement.classList.remove("osdHeader-hidden"); dom.removeEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, { passive: true }); inputManager.off(window, onInputCommand); events.off(playbackManager, "playerchange", onPlayerChange); releaseCurrentPlayer(); }); view.querySelector(".btnFullscreen").addEventListener("click", function () { playbackManager.toggleFullscreen(currentPlayer); }); view.querySelector(".btnPip").addEventListener("click", function () { playbackManager.togglePictureInPicture(currentPlayer); }); view.querySelector(".btnVideoOsdSettings").addEventListener("click", onSettingsButtonClick); view.addEventListener("viewhide", function () { headerElement.classList.remove("hide"); }); view.addEventListener("viewdestroy", function () { if (self.touchHelper) { self.touchHelper.destroy(); self.touchHelper = null; } if (recordingButtonManager) { recordingButtonManager.destroy(); recordingButtonManager = null; } destroyStats(); destroySubtitleSync(); }); var lastPointerDown = 0; dom.addEventListener(view, window.PointerEvent ? "pointerdown" : "click", function (e) { if (dom.parentWithClass(e.target, ["videoOsdBottom", "upNextContainer"])) { return void showOsd(); } var pointerType = e.pointerType || (layoutManager.mobile ? "touch" : "mouse"); var now = new Date().getTime(); switch (pointerType) { case "touch": if (now - lastPointerDown > 300) { lastPointerDown = now; toggleOsd(); } break; case "mouse": if (!e.button) { playbackManager.playPause(currentPlayer); showOsd(); } break; default: playbackManager.playPause(currentPlayer); showOsd(); } }, { passive: true }); if (browser.touch) { dom.addEventListener(view, "dblclick", onDoubleClick, {}); } else { var options = { passive: true }; dom.addEventListener(view, "dblclick", function () { playbackManager.toggleFullscreen(currentPlayer); }, options); } view.querySelector(".buttonMute").addEventListener("click", function () { playbackManager.toggleMute(currentPlayer); }); nowPlayingVolumeSlider.addEventListener("change", function () { if(volumeSliderTimer){ // interupt and remove existing timer clearTimeout(volumeSliderTimer); volumeSliderTimer = null; } playbackManager.setVolume(this.value, currentPlayer); }); nowPlayingVolumeSlider.addEventListener("mousemove", function () { if(!volumeSliderTimer){ var that = this; // register new timer volumeSliderTimer = setTimeout(function(){ playbackManager.setVolume(that.value, currentPlayer); // delete timer after completion volumeSliderTimer = null; }, 700); } }); nowPlayingVolumeSlider.addEventListener("touchmove", function () { if(!volumeSliderTimer){ var that = this; // register new timer volumeSliderTimer = setTimeout(function(){ playbackManager.setVolume(that.value, currentPlayer); // delete timer after completion volumeSliderTimer = null; }, 700); } }); nowPlayingPositionSlider.addEventListener("change", function () { var player = currentPlayer; if (player) { var newPercent = parseFloat(this.value); if (enableProgressByTimeOfDay) { var seekAirTimeTicks = newPercent / 100 * (programEndDateMs - programStartDateMs) * 1e4; seekAirTimeTicks += 1e4 * programStartDateMs; seekAirTimeTicks -= playbackStartTimeTicks; playbackManager.seek(seekAirTimeTicks, player); } else { playbackManager.seekPercent(newPercent, player); } } }); nowPlayingPositionSlider.getBubbleHtml = function (value) { showOsd(); if (enableProgressByTimeOfDay) { if (programStartDateMs && programEndDateMs) { var ms = programEndDateMs - programStartDateMs; ms /= 100; ms *= value; ms += programStartDateMs; return '<h1 class="sliderBubbleText">' + getDisplayTimeWithoutAmPm(new Date(parseInt(ms)), true) + "</h1>"; } return "--:--"; } if (!currentRuntimeTicks) { return "--:--"; } var ticks = currentRuntimeTicks; ticks /= 100; ticks *= value; var item = currentItem; if (item && item.Chapters && item.Chapters.length && item.Chapters[0].ImageTag) { var html = getChapterBubbleHtml(connectionManager.getApiClient(item.ServerId), item, item.Chapters, ticks); if (html) { return html; } } return '<h1 class="sliderBubbleText">' + datetime.getDisplayRunningTime(ticks) + "</h1>"; }; view.querySelector(".btnPreviousTrack").addEventListener("click", function () { playbackManager.previousTrack(currentPlayer); }); view.querySelector(".btnPause").addEventListener("click", function () { playbackManager.playPause(currentPlayer); }); view.querySelector(".btnNextTrack").addEventListener("click", function () { playbackManager.nextTrack(currentPlayer); }); btnRewind.addEventListener("click", function () { playbackManager.rewind(currentPlayer); }); btnFastForward.addEventListener("click", function () { playbackManager.fastForward(currentPlayer); }); view.querySelector(".btnAudio").addEventListener("click", showAudioTrackSelection); view.querySelector(".btnSubtitles").addEventListener("click", showSubtitleTrackSelection); if (browser.touch) { (function () { require(["touchHelper"], function (TouchHelper) { self.touchHelper = new TouchHelper(view, { swipeYThreshold: 30, triggerOnMove: true, preventDefaultOnMove: true, ignoreTagNames: ["BUTTON", "INPUT", "TEXTAREA"] }); events.on(self.touchHelper, "swipeup", onVerticalSwipe); events.on(self.touchHelper, "swipedown", onVerticalSwipe); }); })(); } }; });
1
12,220
can this long line be wrapped?
jellyfin-jellyfin-web
js
@@ -1731,6 +1731,12 @@ class ComparisonChecker(_BasicChecker): 'comparison-with-itself', 'Used when something is compared against itself.', ), + 'W0143': ('Comparing bare callable, might have skipped parenthesis after callable', + 'comparison-with-callable', + 'Used when callable is used in a comparison without parenthesis, ' + 'which means instead of comparing result of callable, ' + 'callable itself is compared', + ), }
1
# -*- coding: utf-8 -*- # Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2010 Daniel Harding <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2013-2017 Claudiu Popa <[email protected]> # Copyright (c) 2014 Brett Cannon <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Nick Bastin <[email protected]> # Copyright (c) 2015 Michael Kefeder <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Stephane Wirtel <[email protected]> # Copyright (c) 2015 Cosmin Poieana <[email protected]> # Copyright (c) 2015 Florian Bruhin <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2016 Glenn Matthews <[email protected]> # Copyright (c) 2016 Elias Dorneles <[email protected]> # Copyright (c) 2016 Ashley Whetter <[email protected]> # Copyright (c) 2016 Yannack <[email protected]> # Copyright (c) 2016 Jakub Wilk <[email protected]> # Copyright (c) 2016 Alex Jurkiewicz <[email protected]> # Copyright (c) 2017 ttenhoeve-aa <[email protected]> # Copyright (c) 2017 hippo91 <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """basic checker for Python code""" import builtins import collections import itertools import sys import re import astroid import astroid.bases import astroid.scoped_nodes from pylint import checkers from pylint import exceptions from pylint import interfaces from pylint.checkers import utils from pylint import reporters from pylint.checkers.utils import get_node_last_lineno from pylint.reporters.ureports import nodes as reporter_nodes import pylint.utils as lint_utils class NamingStyle(object): # It may seem counterintuitive that single naming style # has multiple "accepted" forms of regular expressions, # but we need to special-case stuff like dunder names # in method names. CLASS_NAME_RGX = None MOD_NAME_RGX = None CONST_NAME_RGX = None COMP_VAR_RGX = None DEFAULT_NAME_RGX = None CLASS_ATTRIBUTE_RGX = None @classmethod def get_regex(cls, name_type): return { 'module': cls.MOD_NAME_RGX, 'const': cls.CONST_NAME_RGX, 'class': cls.CLASS_NAME_RGX, 'function': cls.DEFAULT_NAME_RGX, 'method': cls.DEFAULT_NAME_RGX, 'attr': cls.DEFAULT_NAME_RGX, 'argument': cls.DEFAULT_NAME_RGX, 'variable': cls.DEFAULT_NAME_RGX, 'class_attribute': cls.CLASS_ATTRIBUTE_RGX, 'inlinevar': cls.COMP_VAR_RGX, }[name_type] class SnakeCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[a-z_][a-z0-9_]+$') MOD_NAME_RGX = re.compile('([a-z_][a-z0-9_]*)$') CONST_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[a-z_][a-z0-9_]*$') DEFAULT_NAME_RGX = re.compile('(([a-z_][a-z0-9_]{2,30})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$') CLASS_ATTRIBUTE_RGX = re.compile(r'(([a-z_][a-z0-9_]{2,30}|(__.*__)))$') class CamelCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[a-z_][a-zA-Z0-9]+$') MOD_NAME_RGX = re.compile('([a-z_][a-zA-Z0-9]*)$') CONST_NAME_RGX = re.compile('(([a-z_][A-Za-z0-9]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[a-z_][A-Za-z0-9]*$') DEFAULT_NAME_RGX = re.compile('(([a-z_][a-zA-Z0-9]{2,30})|(__[a-z][a-zA-Z0-9_]+__))$') CLASS_ATTRIBUTE_RGX = re.compile(r'([a-z_][A-Za-z0-9]{2,30}|(__.*__))$') class PascalCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') MOD_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') CONST_NAME_RGX = re.compile('(([A-Z_][A-Za-z0-9]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') DEFAULT_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$|(__[a-z][a-zA-Z0-9_]+__)$') CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][a-zA-Z0-9]{2,30}$') class UpperCaseStyle(NamingStyle): CLASS_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$') MOD_NAME_RGX = re.compile('[A-Z_][A-Z0-9_]+$') CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[A-Z_][A-Z0-9_]+$') DEFAULT_NAME_RGX = re.compile('([A-Z_][A-Z0-9_]{2,30})|(__[a-z][a-zA-Z0-9_]+__)$') CLASS_ATTRIBUTE_RGX = re.compile('[A-Z_][A-Z0-9_]{2,30}$') class AnyStyle(NamingStyle): @classmethod def get_regex(cls, name_type): return re.compile('.*') NAMING_STYLES = {'snake_case': SnakeCaseStyle, 'camelCase': CamelCaseStyle, 'PascalCase': PascalCaseStyle, 'UPPER_CASE': UpperCaseStyle, 'any': AnyStyle} # do not require a doc string on private/system methods NO_REQUIRED_DOC_RGX = re.compile('^_') REVERSED_PROTOCOL_METHOD = '__reversed__' SEQUENCE_PROTOCOL_METHODS = ('__getitem__', '__len__') REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD, )) TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==', '!=', 'in', 'not in')) LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set) UNITTEST_CASE = 'unittest.case' BUILTINS = builtins.__name__ TYPE_QNAME = "%s.type" % BUILTINS PY33 = sys.version_info >= (3, 3) PY3K = sys.version_info >= (3, 0) PY35 = sys.version_info >= (3, 5) ABC_METACLASSES = { '_py_abc.ABCMeta', # Python 3.7+, 'abc.ABCMeta', } # Name categories that are always consistent with all naming conventions. EXEMPT_NAME_CATEGORIES = {'exempt', 'ignore'} # A mapping from builtin-qname -> symbol, to be used when generating messages # about dangerous default values as arguments DEFAULT_ARGUMENT_SYMBOLS = dict( zip(['.'.join([BUILTINS, x]) for x in ('set', 'dict', 'list')], ['set()', '{}', '[]']) ) REVERSED_COMPS = {'<': '>', '<=': '>=', '>': '<', '>=': '<='} def _redefines_import(node): """ Detect that the given node (AssignName) is inside an exception handler and redefines an import from the tryexcept body. Returns True if the node redefines an import, False otherwise. """ current = node while current and not isinstance(current.parent, astroid.ExceptHandler): current = current.parent if not current or not utils.error_of_type(current.parent, ImportError): return False try_block = current.parent.parent for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)): for name, alias in import_node.names: if alias: if alias == node.name: return True elif name == node.name: return True return False def in_loop(node): """return True if the node is inside a kind of for loop""" parent = node.parent while parent is not None: if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GeneratorExp)): return True parent = parent.parent return False def in_nested_list(nested_list, obj): """return true if the object is an element of <nested_list> or of a nested list """ for elmt in nested_list: if isinstance(elmt, (list, tuple)): if in_nested_list(elmt, obj): return True elif elmt == obj: return True return False def _get_break_loop_node(break_node): """ Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node. """ loop_nodes = (astroid.For, astroid.While) parent = break_node.parent while not isinstance(parent, loop_nodes) or break_node in getattr(parent, 'orelse', []): parent = parent.parent if parent is None: break return parent def _loop_exits_early(loop): """ Returns true if a loop may ends up in a break statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may ends up in a break statement, False otherwise. """ loop_nodes = (astroid.For, astroid.While) definition_nodes = (astroid.FunctionDef, astroid.ClassDef) inner_loop_nodes = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes ) def _is_multi_naming_match(match, node_type, confidence): return (match is not None and match.lastgroup is not None and match.lastgroup not in EXEMPT_NAME_CATEGORIES and (node_type != 'method' or confidence != interfaces.INFERENCE_FAILURE)) BUILTIN_PROPERTY = 'builtins.property' def _get_properties(config): """Returns a tuple of property classes and names. Property classes are fully qualified, such as 'abc.abstractproperty' and property names are the actual names, such as 'abstract_property'. """ property_classes = {BUILTIN_PROPERTY} property_names = set() # Not returning 'property', it has its own check. if config is not None: property_classes.update(config.property_classes) property_names.update((prop.rsplit('.', 1)[-1] for prop in config.property_classes)) return property_classes, property_names def _determine_function_name_type(node, config=None): """Determine the name type whose regex the a function's name should match. :param node: A function node. :type node: astroid.node_classes.NodeNG :param config: Configuration from which to pull additional property classes. :type config: :class:`optparse.Values` :returns: One of ('function', 'method', 'attr') :rtype: str """ property_classes, property_names = _get_properties(config) if not node.is_method(): return 'function' if node.decorators: decorators = node.decorators.nodes else: decorators = [] for decorator in decorators: # If the function is a property (decorated with @property # or @abc.abstractproperty), the name type is 'attr'. if (isinstance(decorator, astroid.Name) or (isinstance(decorator, astroid.Attribute) and decorator.attrname in property_names)): infered = utils.safe_infer(decorator) if infered and infered.qname() in property_classes: return 'attr' # If the function is decorated using the prop_method.{setter,getter} # form, treat it like an attribute as well. elif (isinstance(decorator, astroid.Attribute) and decorator.attrname in ('setter', 'deleter')): return 'attr' return 'method' def _has_abstract_methods(node): """ Determine if the given `node` has abstract methods. The methods should be made abstract by decorating them with `abc` decorators. """ return len(utils.unimplemented_abstract_methods(node)) > 0 def report_by_type_stats(sect, stats, old_stats): """make a report of * percentage of different types documented * percentage of different types with a bad name """ # percentage of different types documented and/or with a bad name nice_stats = {} for node_type in ('module', 'class', 'method', 'function'): try: total = stats[node_type] except KeyError: raise exceptions.EmptyReportError() nice_stats[node_type] = {} if total != 0: try: documented = total - stats['undocumented_'+node_type] percent = (documented * 100.) / total nice_stats[node_type]['percent_documented'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_documented'] = 'NC' try: percent = (stats['badname_'+node_type] * 100.) / total nice_stats[node_type]['percent_badname'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_badname'] = 'NC' lines = ('type', 'number', 'old number', 'difference', '%documented', '%badname') for node_type in ('module', 'class', 'method', 'function'): new = stats[node_type] old = old_stats.get(node_type, None) if old is not None: diff_str = reporters.diff_string(old, new) else: old, diff_str = 'NC', 'NC' lines += (node_type, str(new), str(old), diff_str, nice_stats[node_type].get('percent_documented', '0'), nice_stats[node_type].get('percent_badname', '0')) sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1)) def redefined_by_decorator(node): """return True if the object is a method redefined via decorator. For example: @property def x(self): return self._x @x.setter def x(self, value): self._x = value """ if node.decorators: for decorator in node.decorators.nodes: if (isinstance(decorator, astroid.Attribute) and getattr(decorator.expr, 'name', None) == node.name): return True return False class _BasicChecker(checkers.BaseChecker): __implements__ = interfaces.IAstroidChecker name = 'basic' class BasicErrorChecker(_BasicChecker): msgs = { 'E0100': ('__init__ method is a generator', 'init-is-generator', 'Used when the special class method __init__ is turned into a ' 'generator by a yield in its body.'), 'E0101': ('Explicit return in __init__', 'return-in-init', 'Used when the special class method __init__ has an explicit ' 'return value.'), 'E0102': ('%s already defined line %s', 'function-redefined', 'Used when a function / class / method is redefined.'), 'E0103': ('%r not properly in loop', 'not-in-loop', 'Used when break or continue keywords are used outside a loop.'), 'E0104': ('Return outside function', 'return-outside-function', 'Used when a "return" statement is found outside a function or ' 'method.'), 'E0105': ('Yield outside function', 'yield-outside-function', 'Used when a "yield" statement is found outside a function or ' 'method.'), 'E0106': ('Return with argument inside generator', 'return-arg-in-generator', 'Used when a "return" statement with an argument is found ' 'outside in a generator function or method (e.g. with some ' '"yield" statements).', {'maxversion': (3, 3)}), 'E0107': ("Use of the non-existent %s operator", 'nonexistent-operator', "Used when you attempt to use the C-style pre-increment or " "pre-decrement operator -- and ++, which doesn't exist in Python."), 'E0108': ('Duplicate argument name %s in function definition', 'duplicate-argument-name', 'Duplicate argument names in function definitions are syntax' ' errors.'), 'E0110': ('Abstract class %r with abstract methods instantiated', 'abstract-class-instantiated', 'Used when an abstract class with `abc.ABCMeta` as metaclass ' 'has abstract methods and is instantiated.'), 'W0120': ('Else clause on loop without a break statement', 'useless-else-on-loop', 'Loops should only have an else clause if they can exit early ' 'with a break statement, otherwise the statements under else ' 'should be on the same scope as the loop itself.'), 'E0112': ('More than one starred expression in assignment', 'too-many-star-expressions', 'Emitted when there are more than one starred ' 'expressions (`*x`) in an assignment. This is a SyntaxError.'), 'E0113': ('Starred assignment target must be in a list or tuple', 'invalid-star-assignment-target', 'Emitted when a star expression is used as a starred ' 'assignment target.'), 'E0114': ('Can use starred expression only in assignment target', 'star-needs-assignment-target', 'Emitted when a star expression is not used in an ' 'assignment target.'), 'E0115': ('Name %r is nonlocal and global', 'nonlocal-and-global', 'Emitted when a name is both nonlocal and global.'), 'E0116': ("'continue' not supported inside 'finally' clause", 'continue-in-finally', 'Emitted when the `continue` keyword is found ' 'inside a finally clause, which is a SyntaxError.'), 'E0117': ("nonlocal name %s found without binding", 'nonlocal-without-binding', 'Emitted when a nonlocal variable does not have an attached ' 'name somewhere in the parent scopes'), 'E0118': ("Name %r is used prior to global declaration", 'used-prior-global-declaration', 'Emitted when a name is used prior a global declaration, ' 'which results in an error since Python 3.6.', {'minversion': (3, 6)}), } @utils.check_messages('function-redefined') def visit_classdef(self, node): self._check_redefinition('class', node) @utils.check_messages('too-many-star-expressions', 'invalid-star-assignment-target') def visit_assign(self, node): starred = list(node.targets[0].nodes_of_class(astroid.Starred)) if len(starred) > 1: self.add_message('too-many-star-expressions', node=node) # Check *a = b if isinstance(node.targets[0], astroid.Starred): self.add_message('invalid-star-assignment-target', node=node) @utils.check_messages('star-needs-assignment-target') def visit_starred(self, node): """Check that a Starred expression is used in an assignment target.""" if isinstance(node.parent, astroid.Call): # f(*args) is converted to Call(args=[Starred]), so ignore # them for this check. return if PY35 and isinstance(node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)): # PEP 448 unpacking. return stmt = node.statement() if not isinstance(stmt, astroid.Assign): return if stmt.value is node or stmt.value.parent_of(node): self.add_message('star-needs-assignment-target', node=node) @utils.check_messages('init-is-generator', 'return-in-init', 'function-redefined', 'return-arg-in-generator', 'duplicate-argument-name', 'nonlocal-and-global', 'used-prior-global-declaration') def visit_functiondef(self, node): self._check_nonlocal_and_global(node) self._check_name_used_prior_global(node) if (not redefined_by_decorator(node) and not utils.is_registered_in_singledispatch_function(node)): self._check_redefinition(node.is_method() and 'method' or 'function', node) # checks for max returns, branch, return in __init__ returns = node.nodes_of_class(astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)) if node.is_method() and node.name == '__init__': if node.is_generator(): self.add_message('init-is-generator', node=node) else: values = [r.value for r in returns] # Are we returning anything but None from constructors if any(v for v in values if not utils.is_none(v)): self.add_message('return-in-init', node=node) elif node.is_generator(): # make sure we don't mix non-None returns and yields if not PY33: for retnode in returns: if isinstance(retnode.value, astroid.Const) and \ retnode.value.value is not None: self.add_message('return-arg-in-generator', node=node, line=retnode.fromlineno) # Check for duplicate names args = set() for name in node.argnames(): if name in args: self.add_message('duplicate-argument-name', node=node, args=(name,)) else: args.add(name) visit_asyncfunctiondef = visit_functiondef def _check_name_used_prior_global(self, node): scope_globals = { name: child for child in node.nodes_of_class(astroid.Global) for name in child.names if child.scope() is node } for node_name in node.nodes_of_class(astroid.Name): if node_name.scope() is not node: continue name = node_name.name corresponding_global = scope_globals.get(name) if not corresponding_global: continue global_lineno = corresponding_global.fromlineno if global_lineno and global_lineno > node_name.fromlineno: self.add_message('used-prior-global-declaration', node=node_name, args=(name, )) def _check_nonlocal_and_global(self, node): """Check that a name is both nonlocal and global.""" def same_scope(current): return current.scope() is node from_iter = itertools.chain.from_iterable nonlocals = set(from_iter( child.names for child in node.nodes_of_class(astroid.Nonlocal) if same_scope(child))) global_vars = set(from_iter( child.names for child in node.nodes_of_class(astroid.Global) if same_scope(child))) for name in nonlocals.intersection(global_vars): self.add_message('nonlocal-and-global', args=(name, ), node=node) @utils.check_messages('return-outside-function') def visit_return(self, node): if not isinstance(node.frame(), astroid.FunctionDef): self.add_message('return-outside-function', node=node) @utils.check_messages('yield-outside-function') def visit_yield(self, node): self._check_yield_outside_func(node) @utils.check_messages('yield-outside-function') def visit_yieldfrom(self, node): self._check_yield_outside_func(node) @utils.check_messages('not-in-loop', 'continue-in-finally') def visit_continue(self, node): self._check_in_loop(node, 'continue') @utils.check_messages('not-in-loop') def visit_break(self, node): self._check_in_loop(node, 'break') @utils.check_messages('useless-else-on-loop') def visit_for(self, node): self._check_else_on_loop(node) @utils.check_messages('useless-else-on-loop') def visit_while(self, node): self._check_else_on_loop(node) @utils.check_messages('nonexistent-operator') def visit_unaryop(self, node): """check use of the non-existent ++ and -- operator operator""" if ((node.op in '+-') and isinstance(node.operand, astroid.UnaryOp) and (node.operand.op == node.op)): self.add_message('nonexistent-operator', node=node, args=node.op*2) def _check_nonlocal_without_binding(self, node, name): current_scope = node.scope() while True: if current_scope.parent is None: break if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)): self.add_message('nonlocal-without-binding', args=(name, ), node=node) return if name not in current_scope.locals: current_scope = current_scope.parent.scope() continue # Okay, found it. return if not isinstance(current_scope, astroid.FunctionDef): self.add_message('nonlocal-without-binding', args=(name, ), node=node) @utils.check_messages('nonlocal-without-binding') def visit_nonlocal(self, node): for name in node.names: self._check_nonlocal_without_binding(node, name) @utils.check_messages('abstract-class-instantiated') def visit_call(self, node): """ Check instantiating abstract class with abc.ABCMeta as metaclass. """ try: for inferred in node.func.infer(): self._check_inferred_class_is_abstract(inferred, node) except astroid.InferenceError: return def _check_inferred_class_is_abstract(self, infered, node): if not isinstance(infered, astroid.ClassDef): return klass = utils.node_frame_class(node) if klass is infered: # Don't emit the warning if the class is instantiated # in its own body or if the call is not an instance # creation. If the class is instantiated into its own # body, we're expecting that it knows what it is doing. return # __init__ was called metaclass = infered.metaclass() abstract_methods = _has_abstract_methods(infered) if metaclass is None: # Python 3.4 has `abc.ABC`, which won't be detected # by ClassNode.metaclass() for ancestor in infered.ancestors(): if ancestor.qname() == 'abc.ABC' and abstract_methods: self.add_message('abstract-class-instantiated', args=(infered.name, ), node=node) break return if metaclass.qname() in ABC_METACLASSES and abstract_methods: self.add_message('abstract-class-instantiated', args=(infered.name, ), node=node) def _check_yield_outside_func(self, node): if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)): self.add_message('yield-outside-function', node=node) def _check_else_on_loop(self, node): """Check that any loop with an else clause has a break statement.""" if node.orelse and not _loop_exits_early(node): self.add_message('useless-else-on-loop', node=node, # This is not optimal, but the line previous # to the first statement in the else clause # will usually be the one that contains the else:. line=node.orelse[0].lineno - 1) def _check_in_loop(self, node, node_name): """check that a node is inside a for or while loop""" _node = node.parent while _node: if isinstance(_node, (astroid.For, astroid.While)): if node not in _node.orelse: return if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)): break if (isinstance(_node, astroid.TryFinally) and node in _node.finalbody and isinstance(node, astroid.Continue)): self.add_message('continue-in-finally', node=node) _node = _node.parent self.add_message('not-in-loop', node=node, args=node_name) def _check_redefinition(self, redeftype, node): """check for redefinition of a function / method / class name""" defined_self = node.parent.frame()[node.name] if defined_self is not node and not astroid.are_exclusive(node, defined_self): dummy_variables_rgx = lint_utils.get_global_option( self, 'dummy-variables-rgx', default=None) if dummy_variables_rgx and dummy_variables_rgx.match(node.name): return self.add_message('function-redefined', node=node, args=(redeftype, defined_self.fromlineno)) class BasicChecker(_BasicChecker): """checks for : * doc strings * number of arguments, local variables, branches, returns and statements in functions, methods * required module attributes * dangerous default values as arguments * redefinition of function / method / class * uses of the global statement """ __implements__ = interfaces.IAstroidChecker name = 'basic' msgs = { 'W0101': ('Unreachable code', 'unreachable', 'Used when there is some code behind a "return" or "raise" ' 'statement, which will never be accessed.'), 'W0102': ('Dangerous default value %s as argument', 'dangerous-default-value', 'Used when a mutable value as list or dictionary is detected in ' 'a default value for an argument.'), 'W0104': ('Statement seems to have no effect', 'pointless-statement', 'Used when a statement doesn\'t have (or at least seems to) ' 'any effect.'), 'W0105': ('String statement has no effect', 'pointless-string-statement', 'Used when a string is used as a statement (which of course ' 'has no effect). This is a particular case of W0104 with its ' 'own message so you can easily disable it if you\'re using ' 'those strings as documentation, instead of comments.'), 'W0106': ('Expression "%s" is assigned to nothing', 'expression-not-assigned', 'Used when an expression that is not a function call is assigned ' 'to nothing. Probably something else was intended.'), 'W0108': ('Lambda may not be necessary', 'unnecessary-lambda', 'Used when the body of a lambda expression is a function call ' 'on the same argument list as the lambda itself; such lambda ' 'expressions are in all but a few cases replaceable with the ' 'function being called in the body of the lambda.'), 'W0109': ("Duplicate key %r in dictionary", 'duplicate-key', 'Used when a dictionary expression binds the same key multiple ' 'times.'), 'W0122': ('Use of exec', 'exec-used', 'Used when you use the "exec" statement (function for Python ' '3), to discourage its usage. That doesn\'t ' 'mean you cannot use it !'), 'W0123': ('Use of eval', 'eval-used', 'Used when you use the "eval" function, to discourage its ' 'usage. Consider using `ast.literal_eval` for safely evaluating ' 'strings containing Python expressions ' 'from untrusted sources. '), 'W0150': ("%s statement in finally block may swallow exception", 'lost-exception', 'Used when a break or a return statement is found inside the ' 'finally clause of a try...finally block: the exceptions raised ' 'in the try clause will be silently swallowed instead of being ' 're-raised.'), 'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?', 'assert-on-tuple', 'A call of assert on a tuple will always evaluate to true if ' 'the tuple is not empty, and will always evaluate to false if ' 'it is.'), 'W0124': ('Following "as" with another context manager looks like a tuple.', 'confusing-with-statement', 'Emitted when a `with` statement component returns multiple values ' 'and uses name binding with `as` only for a part of those values, ' 'as in with ctx() as a, b. This can be misleading, since it\'s not ' 'clear if the context manager returns a tuple or if the node without ' 'a name binding is another context manager.'), 'W0125': ('Using a conditional statement with a constant value', 'using-constant-test', 'Emitted when a conditional statement (If or ternary if) ' 'uses a constant value for its test. This might not be what ' 'the user intended to do.'), 'E0111': ('The first reversed() argument is not a sequence', 'bad-reversed-sequence', 'Used when the first argument to reversed() builtin ' 'isn\'t a sequence (does not implement __reversed__, ' 'nor __getitem__ and __len__'), } reports = (('RP0101', 'Statistics by type', report_by_type_stats),) def __init__(self, linter): _BasicChecker.__init__(self, linter) self.stats = None self._tryfinallys = None def open(self): """initialize visit variables and statistics """ self._tryfinallys = [] self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) @utils.check_messages('using-constant-test') def visit_if(self, node): self._check_using_constant_test(node, node.test) @utils.check_messages('using-constant-test') def visit_ifexp(self, node): self._check_using_constant_test(node, node.test) @utils.check_messages('using-constant-test') def visit_comprehension(self, node): if node.ifs: for if_test in node.ifs: self._check_using_constant_test(node, if_test) def _check_using_constant_test(self, node, test): const_nodes = ( astroid.Module, astroid.scoped_nodes.GeneratorExp, astroid.Lambda, astroid.FunctionDef, astroid.ClassDef, astroid.bases.Generator, astroid.UnboundMethod, astroid.BoundMethod, astroid.Module) structs = (astroid.Dict, astroid.Tuple, astroid.Set) # These nodes are excepted, since they are not constant # values, requiring a computation to happen. The only type # of node in this list which doesn't have this property is # Attribute, which is excepted because the conditional statement # can be used to verify that the attribute was set inside a class, # which is definitely a valid use case. except_nodes = (astroid.Attribute, astroid.Call, astroid.BinOp, astroid.BoolOp, astroid.UnaryOp, astroid.Subscript) inferred = None emit = isinstance(test, (astroid.Const, ) + structs + const_nodes) if not isinstance(test, except_nodes): inferred = utils.safe_infer(test) if emit or isinstance(inferred, const_nodes): self.add_message('using-constant-test', node=node) def visit_module(self, _): """check module name, docstring and required arguments """ self.stats['module'] += 1 def visit_classdef(self, node): # pylint: disable=unused-argument """check module name, docstring and redefinition increment branch counter """ self.stats['class'] += 1 @utils.check_messages('pointless-statement', 'pointless-string-statement', 'expression-not-assigned') def visit_expr(self, node): """check for various kind of statements without effect""" expr = node.value if isinstance(expr, astroid.Const) and isinstance(expr.value, str): # treat string statement in a separated message # Handle PEP-257 attribute docstrings. # An attribute docstring is defined as being a string right after # an assignment at the module level, class level or __init__ level. scope = expr.scope() if isinstance(scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)): if isinstance(scope, astroid.FunctionDef) and scope.name != '__init__': pass else: sibling = expr.previous_sibling() if (sibling is not None and sibling.scope() is scope and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))): return self.add_message('pointless-string-statement', node=node) return # ignore if this is : # * a direct function call # * the unique child of a try/except body # * a yield (which are wrapped by a discard node in _ast XXX) # warn W0106 if we have any underlying function call (we can't predict # side effects), else pointless-statement if (isinstance(expr, (astroid.Yield, astroid.Await, astroid.Call)) or (isinstance(node.parent, astroid.TryExcept) and node.parent.body == [node])): return if any(expr.nodes_of_class(astroid.Call)): self.add_message('expression-not-assigned', node=node, args=expr.as_string()) else: self.add_message('pointless-statement', node=node) @staticmethod def _filter_vararg(node, call_args): # Return the arguments for the given call which are # not passed as vararg. for arg in call_args: if isinstance(arg, astroid.Starred): if (isinstance(arg.value, astroid.Name) and arg.value.name != node.args.vararg): yield arg else: yield arg @staticmethod def _has_variadic_argument(args, variadic_name): if not args: return True for arg in args: if isinstance(arg.value, astroid.Name): if arg.value.name != variadic_name: return True else: return True return False @utils.check_messages('unnecessary-lambda') def visit_lambda(self, node): """check whether or not the lambda is suspicious """ # if the body of the lambda is a call expression with the same # argument list as the lambda itself, then the lambda is # possibly unnecessary and at least suspicious. if node.args.defaults: # If the arguments of the lambda include defaults, then a # judgment cannot be made because there is no way to check # that the defaults defined by the lambda are the same as # the defaults defined by the function called in the body # of the lambda. return call = node.body if not isinstance(call, astroid.Call): # The body of the lambda must be a function call expression # for the lambda to be unnecessary. return if (isinstance(node.body.func, astroid.Attribute) and isinstance(node.body.func.expr, astroid.Call)): # Chained call, the intermediate call might # return something else (but we don't check that, yet). return ordinary_args = list(node.args.args) new_call_args = list(self._filter_vararg(node, call.args)) if node.args.kwarg: if self._has_variadic_argument(call.kwargs, node.args.kwarg): return elif call.kwargs or call.keywords: return if node.args.vararg: if self._has_variadic_argument(call.starargs, node.args.vararg): return elif call.starargs: return # The "ordinary" arguments must be in a correspondence such that: # ordinary_args[i].name == call.args[i].name. if len(ordinary_args) != len(new_call_args): return for arg, passed_arg in zip(ordinary_args, new_call_args): if not isinstance(passed_arg, astroid.Name): return if arg.name != passed_arg.name: return self.add_message('unnecessary-lambda', line=node.fromlineno, node=node) @utils.check_messages('dangerous-default-value') def visit_functiondef(self, node): """check function name, docstring, arguments, redefinition, variable names, max locals """ self.stats[node.is_method() and 'method' or 'function'] += 1 self._check_dangerous_default(node) visit_asyncfunctiondef = visit_functiondef def _check_dangerous_default(self, node): # check for dangerous default values as arguments is_iterable = lambda n: isinstance(n, (astroid.List, astroid.Set, astroid.Dict)) for default in node.args.defaults: try: value = next(default.infer()) except astroid.InferenceError: continue if (isinstance(value, astroid.Instance) and value.qname() in DEFAULT_ARGUMENT_SYMBOLS): if value is default: msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()] elif isinstance(value, astroid.Instance) or is_iterable(value): # We are here in the following situation(s): # * a dict/set/list/tuple call which wasn't inferred # to a syntax node ({}, () etc.). This can happen # when the arguments are invalid or unknown to # the inference. # * a variable from somewhere else, which turns out to be a list # or a dict. if is_iterable(default): msg = value.pytype() elif isinstance(default, astroid.Call): msg = '%s() (%s)' % (value.name, value.qname()) else: msg = '%s (%s)' % (default.as_string(), value.qname()) else: # this argument is a name msg = '%s (%s)' % (default.as_string(), DEFAULT_ARGUMENT_SYMBOLS[value.qname()]) self.add_message('dangerous-default-value', node=node, args=(msg, )) @utils.check_messages('unreachable', 'lost-exception') def visit_return(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ self._check_unreachable(node) # Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'return', (astroid.FunctionDef,)) @utils.check_messages('unreachable') def visit_continue(self, node): """check is the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @utils.check_messages('unreachable', 'lost-exception') def visit_break(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ # 1 - Is it right sibling ? self._check_unreachable(node) # 2 - Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,)) @utils.check_messages('unreachable') def visit_raise(self, node): """check if the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @utils.check_messages('exec-used') def visit_exec(self, node): """just print a warning on exec statements""" self.add_message('exec-used', node=node) @utils.check_messages('eval-used', 'exec-used', 'bad-reversed-sequence') def visit_call(self, node): """visit a Call node -> check if this is not a blacklisted builtin call and check for * or ** use """ if isinstance(node.func, astroid.Name): name = node.func.name # ignore the name if it's not a builtin (i.e. not defined in the # locals nor globals scope) if not (name in node.frame() or name in node.root()): if name == 'exec': self.add_message('exec-used', node=node) elif name == 'reversed': self._check_reversed(node) elif name == 'eval': self.add_message('eval-used', node=node) @utils.check_messages('assert-on-tuple') def visit_assert(self, node): """check the use of an assert statement on a tuple.""" if node.fail is None and isinstance(node.test, astroid.Tuple) and \ len(node.test.elts) == 2: self.add_message('assert-on-tuple', node=node) @utils.check_messages('duplicate-key') def visit_dict(self, node): """check duplicate key in dictionary""" keys = set() for k, _ in node.items: if isinstance(k, astroid.Const): key = k.value if key in keys: self.add_message('duplicate-key', node=node, args=key) keys.add(key) def visit_tryfinally(self, node): """update try...finally flag""" self._tryfinallys.append(node) def leave_tryfinally(self, node): # pylint: disable=unused-argument """update try...finally flag""" self._tryfinallys.pop() def _check_unreachable(self, node): """check unreachable code""" unreach_stmt = node.next_sibling() if unreach_stmt is not None: self.add_message('unreachable', node=unreach_stmt) def _check_not_in_finally(self, node, node_name, breaker_classes=()): """check that a node is not inside a finally clause of a try...finally statement. If we found before a try...finally bloc a parent which its type is in breaker_classes, we skip the whole check.""" # if self._tryfinallys is empty, we're not an in try...finally block if not self._tryfinallys: return # the node could be a grand-grand...-children of the try...finally _parent = node.parent _node = node while _parent and not isinstance(_parent, breaker_classes): if hasattr(_parent, 'finalbody') and _node in _parent.finalbody: self.add_message('lost-exception', node=node, args=node_name) return _node = _parent _parent = _node.parent def _check_reversed(self, node): """ check that the argument to `reversed` is a sequence """ try: argument = utils.safe_infer(utils.get_argument_from_call(node, position=0)) except utils.NoSuchArgumentError: pass else: if argument is astroid.Uninferable: return if argument is None: # Nothing was infered. # Try to see if we have iter(). if isinstance(node.args[0], astroid.Call): try: func = next(node.args[0].func.infer()) except astroid.InferenceError: return if (getattr(func, 'name', None) == 'iter' and utils.is_builtin_object(func)): self.add_message('bad-reversed-sequence', node=node) return if isinstance(argument, astroid.Instance): if (argument._proxied.name == 'dict' and utils.is_builtin_object(argument._proxied)): self.add_message('bad-reversed-sequence', node=node) return if any(ancestor.name == 'dict' and utils.is_builtin_object(ancestor) for ancestor in argument._proxied.ancestors()): # Mappings aren't accepted by reversed(), unless # they provide explicitly a __reversed__ method. try: argument.locals[REVERSED_PROTOCOL_METHOD] except KeyError: self.add_message('bad-reversed-sequence', node=node) return for methods in REVERSED_METHODS: for meth in methods: try: argument.getattr(meth) except astroid.NotFoundError: break else: break else: self.add_message('bad-reversed-sequence', node=node) elif not isinstance(argument, (astroid.List, astroid.Tuple)): # everything else is not a proper sequence for reversed() self.add_message('bad-reversed-sequence', node=node) @utils.check_messages('confusing-with-statement') def visit_with(self, node): if not PY3K: # in Python 2 a "with" statement with multiple managers coresponds # to multiple nested AST "With" nodes pairs = [] parent_node = node.parent if isinstance(parent_node, astroid.With): # we only care about the direct parent, since this method # gets called for each with node anyway pairs.extend(parent_node.items) pairs.extend(node.items) else: # in PY3K a "with" statement with multiple managers coresponds # to one AST "With" node with multiple items pairs = node.items if pairs: for prev_pair, pair in zip(pairs, pairs[1:]): if (isinstance(prev_pair[1], astroid.AssignName) and (pair[1] is None and not isinstance(pair[0], astroid.Call))): # don't emit a message if the second is a function call # there's no way that can be mistaken for a name assignment if PY3K or node.lineno == node.parent.lineno: # if the line number doesn't match # we assume it's a nested "with" self.add_message('confusing-with-statement', node=node) KNOWN_NAME_TYPES = { "module", "const", "class", "function", "method", "attr", "argument", "variable", "class_attribute", "inlinevar" } HUMAN_READABLE_TYPES = { 'module': 'module', 'const': 'constant', 'class': 'class', 'function': 'function', 'method': 'method', 'attr': 'attribute', 'argument': 'argument', 'variable': 'variable', 'class_attribute': 'class attribute', 'inlinevar': 'inline iteration', } DEFAULT_NAMING_STYLES = { "module": "snake_case", "const": "UPPER_CASE", "class": "PascalCase", "function": "snake_case", "method": "snake_case", "attr": "snake_case", "argument": "snake_case", "variable": "snake_case", "class_attribute": "any", "inlinevar": "any", } def _create_naming_options(): name_options = [] for name_type in sorted(KNOWN_NAME_TYPES): human_readable_name = HUMAN_READABLE_TYPES[name_type] default_style = DEFAULT_NAMING_STYLES[name_type] name_type = name_type.replace('_', '-') name_options.append(( '%s-naming-style' % (name_type,), {'default': default_style, 'type': 'choice', 'choices': list(NAMING_STYLES.keys()), 'metavar': '<style>', 'help': 'Naming style matching correct %s names' % (human_readable_name,)}),) name_options.append(( '%s-rgx' % (name_type,), {'default': None, 'type': 'regexp', 'metavar': '<regexp>', 'help': 'Regular expression matching correct %s names. Overrides %s-naming-style' % (human_readable_name, name_type,)})) return tuple(name_options) class NameChecker(_BasicChecker): msgs = { 'C0102': ('Black listed name "%s"', 'blacklisted-name', 'Used when the name is listed in the black list (unauthorized ' 'names).'), 'C0103': ('%s name "%s" doesn\'t conform to %s', 'invalid-name', 'Used when the name doesn\'t conform to naming rules ' 'associated to its type (constant, variable, class...).'), 'W0111': ('Name %s will become a keyword in Python %s', 'assign-to-new-keyword', 'Used when assignment will become invalid in future ' 'Python release due to introducing new keyword'), } options = (('good-names', {'default' : ('i', 'j', 'k', 'ex', 'Run', '_'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Good variable names which should always be accepted,' ' separated by a comma'} ), ('bad-names', {'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Bad variable names which should always be refused, ' 'separated by a comma'} ), ('name-group', {'default' : (), 'type' :'csv', 'metavar' : '<name1:name2>', 'help' : ('Colon-delimited sets of names that determine each' ' other\'s naming style when the name regexes' ' allow several styles.')} ), ('include-naming-hint', {'default': False, 'type': 'yn', 'metavar': '<y_or_n>', 'help': 'Include a hint for the correct naming format with invalid-name'} ), ('property-classes', {'default': ('abc.abstractproperty',), 'type': 'csv', 'metavar': '<decorator names>', 'help': 'List of decorators that produce properties, such as ' 'abc.abstractproperty. Add to this list to register ' 'other decorators that produce valid properties.'} ), ) + _create_naming_options() KEYWORD_ONSET = { (3, 7): {'async', 'await'} } def __init__(self, linter): _BasicChecker.__init__(self, linter) self._name_category = {} self._name_group = {} self._bad_names = {} self._name_regexps = {} self._name_hints = {} def open(self): self.stats = self.linter.add_stats(badname_module=0, badname_class=0, badname_function=0, badname_method=0, badname_attr=0, badname_const=0, badname_variable=0, badname_inlinevar=0, badname_argument=0, badname_class_attribute=0) for group in self.config.name_group: for name_type in group.split(':'): self._name_group[name_type] = 'group_%s' % (group,) regexps, hints = self._create_naming_rules() self._name_regexps = regexps self._name_hints = hints def _create_naming_rules(self): regexps = {} hints = {} for name_type in KNOWN_NAME_TYPES: naming_style_option_name = "%s_naming_style" % (name_type,) naming_style_name = getattr(self.config, naming_style_option_name) regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type) custom_regex_setting_name = "%s_rgx" % (name_type, ) custom_regex = getattr(self.config, custom_regex_setting_name, None) if custom_regex is not None: regexps[name_type] = custom_regex if custom_regex is not None: hints[name_type] = "%r pattern" % custom_regex.pattern else: hints[name_type] = "%s naming style" % naming_style_name return regexps, hints @utils.check_messages('blacklisted-name', 'invalid-name') def visit_module(self, node): self._check_name('module', node.name.split('.')[-1], node) self._bad_names = {} def leave_module(self, node): # pylint: disable=unused-argument for all_groups in self._bad_names.values(): if len(all_groups) < 2: continue groups = collections.defaultdict(list) min_warnings = sys.maxsize for group in all_groups.values(): groups[len(group)].append(group) min_warnings = min(len(group), min_warnings) if len(groups[min_warnings]) > 1: by_line = sorted(groups[min_warnings], key=lambda group: min(warning[0].lineno for warning in group)) warnings = itertools.chain(*by_line[1:]) else: warnings = groups[min_warnings][0] for args in warnings: self._raise_name_warning(*args) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_classdef(self, node): self._check_assign_to_new_keyword_violation(node.name, node) self._check_name('class', node.name, node) for attr, anodes in node.instance_attrs.items(): if not any(node.instance_attr_ancestors(attr)): self._check_name('attr', attr, anodes[0]) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_functiondef(self, node): # Do not emit any warnings if the method is just an implementation # of a base class method. self._check_assign_to_new_keyword_violation(node.name, node) confidence = interfaces.HIGH if node.is_method(): if utils.overrides_a_method(node.parent.frame(), node.name): return confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame()) else interfaces.INFERENCE_FAILURE) self._check_name(_determine_function_name_type(node, config=self.config), node.name, node, confidence) # Check argument names args = node.args.args if args is not None: self._recursive_check_names(args, node) visit_asyncfunctiondef = visit_functiondef @utils.check_messages('blacklisted-name', 'invalid-name') def visit_global(self, node): for name in node.names: self._check_name('const', name, node) @utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword') def visit_assignname(self, node): """check module level assigned names""" self._check_assign_to_new_keyword_violation(node.name, node) frame = node.frame() assign_type = node.assign_type() if isinstance(assign_type, astroid.Comprehension): self._check_name('inlinevar', node.name, node) elif isinstance(frame, astroid.Module): if isinstance(assign_type, astroid.Assign) and not in_loop(assign_type): if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef): self._check_name('class', node.name, node) else: if not _redefines_import(node): # Don't emit if the name redefines an import # in an ImportError except handler. self._check_name('const', node.name, node) elif isinstance(assign_type, astroid.ExceptHandler): self._check_name('variable', node.name, node) elif isinstance(frame, astroid.FunctionDef): # global introduced variable aren't in the function locals if node.name in frame and node.name not in frame.argnames(): if not _redefines_import(node): self._check_name('variable', node.name, node) elif isinstance(frame, astroid.ClassDef): if not list(frame.local_attr_ancestors(node.name)): self._check_name('class_attribute', node.name, node) def _recursive_check_names(self, args, node): """check names in a possibly recursive list <arg>""" for arg in args: if isinstance(arg, astroid.AssignName): self._check_name('argument', arg.name, node) else: self._recursive_check_names(arg.elts, node) def _find_name_group(self, node_type): return self._name_group.get(node_type, node_type) def _raise_name_warning(self, node, node_type, name, confidence): type_label = HUMAN_READABLE_TYPES[node_type] hint = self._name_hints[node_type] if self.config.include_naming_hint: hint += " (%r pattern)" % self._name_regexps[node_type].pattern args = ( type_label.capitalize(), name, hint ) self.add_message('invalid-name', node=node, args=args, confidence=confidence) self.stats['badname_' + node_type] += 1 def _check_name(self, node_type, name, node, confidence=interfaces.HIGH): """check for a name using the type's regexp""" def _should_exempt_from_invalid_name(node): if node_type == 'variable': inferred = utils.safe_infer(node) if isinstance(inferred, astroid.ClassDef): return True return False if utils.is_inside_except(node): clobbering, _ = utils.clobber_in_except(node) if clobbering: return if name in self.config.good_names: return if name in self.config.bad_names: self.stats['badname_' + node_type] += 1 self.add_message('blacklisted-name', node=node, args=name) return regexp = self._name_regexps[node_type] match = regexp.match(name) if _is_multi_naming_match(match, node_type, confidence): name_group = self._find_name_group(node_type) bad_name_group = self._bad_names.setdefault(name_group, {}) warnings = bad_name_group.setdefault(match.lastgroup, []) warnings.append((node, node_type, name, confidence)) if match is None and not _should_exempt_from_invalid_name(node): self._raise_name_warning(node, node_type, name, confidence) def _check_assign_to_new_keyword_violation(self, name, node): keyword_first_version = self._name_became_keyword_in_version( name, self.KEYWORD_ONSET ) if keyword_first_version is not None: self.add_message('assign-to-new-keyword', node=node, args=(name, keyword_first_version), confidence=interfaces.HIGH) @staticmethod def _name_became_keyword_in_version(name, rules): for version, keywords in rules.items(): if name in keywords and sys.version_info < version: return '.'.join(map(str, version)) return None class DocStringChecker(_BasicChecker): msgs = { 'C0111': ('Missing %s docstring', # W0131 'missing-docstring', 'Used when a module, function, class or method has no docstring.' 'Some special methods like __init__ doesn\'t necessary require a ' 'docstring.'), 'C0112': ('Empty %s docstring', # W0132 'empty-docstring', 'Used when a module, function, class or method has an empty ' 'docstring (it would be too easy ;).'), } options = (('no-docstring-rgx', {'default' : NO_REQUIRED_DOC_RGX, 'type' : 'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match ' 'function or class names that do not require a ' 'docstring.'} ), ('docstring-min-length', {'default' : -1, 'type' : 'int', 'metavar' : '<int>', 'help': ('Minimum line length for functions/classes that' ' require docstrings, shorter ones are exempt.')} ), ) def open(self): self.stats = self.linter.add_stats(undocumented_module=0, undocumented_function=0, undocumented_method=0, undocumented_class=0) @utils.check_messages('missing-docstring', 'empty-docstring') def visit_module(self, node): self._check_docstring('module', node) @utils.check_messages('missing-docstring', 'empty-docstring') def visit_classdef(self, node): if self.config.no_docstring_rgx.match(node.name) is None: self._check_docstring('class', node) @staticmethod def _is_setter_or_deleter(node): names = {'setter', 'deleter'} for decorator in node.decorators.nodes: if (isinstance(decorator, astroid.Attribute) and decorator.attrname in names): return True return False @utils.check_messages('missing-docstring', 'empty-docstring') def visit_functiondef(self, node): if self.config.no_docstring_rgx.match(node.name) is None: ftype = 'method' if node.is_method() else 'function' if node.decorators and self._is_setter_or_deleter(node): return if isinstance(node.parent.frame(), astroid.ClassDef): overridden = False confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame()) else interfaces.INFERENCE_FAILURE) # check if node is from a method overridden by its ancestor for ancestor in node.parent.frame().ancestors(): if node.name in ancestor and \ isinstance(ancestor[node.name], astroid.FunctionDef): overridden = True break self._check_docstring(ftype, node, report_missing=not overridden, confidence=confidence) elif isinstance(node.parent.frame(), astroid.Module): self._check_docstring(ftype, node) else: return visit_asyncfunctiondef = visit_functiondef def _check_docstring(self, node_type, node, report_missing=True, confidence=interfaces.HIGH): """check the node has a non empty docstring""" docstring = node.doc if docstring is None: if not report_missing: return lines = get_node_last_lineno(node) - node.lineno if node_type == 'module' and not lines: # If the module has no body, there's no reason # to require a docstring. return max_lines = self.config.docstring_min_length if node_type != 'module' and max_lines > -1 and lines < max_lines: return self.stats['undocumented_'+node_type] += 1 if (node.body and isinstance(node.body[0], astroid.Expr) and isinstance(node.body[0].value, astroid.Call)): # Most likely a string with a format call. Let's see. func = utils.safe_infer(node.body[0].value.func) if (isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance)): # Strings in Python 3, others in Python 2. if PY3K and func.bound.name == 'str': return if func.bound.name in ('str', 'unicode', 'bytes'): return self.add_message('missing-docstring', node=node, args=(node_type,), confidence=confidence) elif not docstring.strip(): self.stats['undocumented_'+node_type] += 1 self.add_message('empty-docstring', node=node, args=(node_type,), confidence=confidence) class PassChecker(_BasicChecker): """check if the pass statement is really necessary""" msgs = {'W0107': ('Unnecessary pass statement', 'unnecessary-pass', 'Used when a "pass" statement that can be avoided is ' 'encountered.'), } @utils.check_messages('unnecessary-pass') def visit_pass(self, node): if len(node.parent.child_sequence(node)) > 1: self.add_message('unnecessary-pass', node=node) class LambdaForComprehensionChecker(_BasicChecker): """check for using a lambda where a comprehension would do. See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196> where GvR says comprehensions would be clearer. """ msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension', 'deprecated-lambda', 'Used when a lambda is the first argument to "map" or ' '"filter". It could be clearer as a list ' 'comprehension or generator expression.', {'maxversion': (3, 0)}), } @utils.check_messages('deprecated-lambda') def visit_call(self, node): """visit a Call node, check if map or filter are called with a lambda """ if not node.args: return if not isinstance(node.args[0], astroid.Lambda): return infered = utils.safe_infer(node.func) if (utils.is_builtin_object(infered) and infered.name in ['map', 'filter']): self.add_message('deprecated-lambda', node=node) def _is_one_arg_pos_call(call): """Is this a call with exactly 1 argument, where that argument is positional? """ return (isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords) class ComparisonChecker(_BasicChecker): """Checks for comparisons - singleton comparison: 'expr == True', 'expr == False' and 'expr == None' - yoda condition: 'const "comp" right' where comp can be '==', '!=', '<', '<=', '>' or '>=', and right can be a variable, an attribute, a method or a function """ msgs = {'C0121': ('Comparison to %s should be %s', 'singleton-comparison', 'Used when an expression is compared to singleton ' 'values like True, False or None.'), 'C0122': ('Comparison should be %s', 'misplaced-comparison-constant', 'Used when the constant is placed on the left side ' 'of a comparison. It is usually clearer in intent to ' 'place it in the right hand side of the comparison.'), 'C0123': ('Using type() instead of isinstance() for a typecheck.', 'unidiomatic-typecheck', 'The idiomatic way to perform an explicit typecheck in ' 'Python is to use isinstance(x, Y) rather than ' 'type(x) == Y, type(x) is Y. Though there are unusual ' 'situations where these give different results.', {'old_names': [('W0154', 'unidiomatic-typecheck')]}), 'R0123': ('Comparison to literal', 'literal-comparison', 'Used when comparing an object to a literal, which is usually ' 'what you do not want to do, since you can compare to a different ' 'literal than what was expected altogether.'), 'R0124': ('Redundant comparison - %s', 'comparison-with-itself', 'Used when something is compared against itself.', ), } def _check_singleton_comparison(self, singleton, root_node, negative_check=False): if singleton.value is True: if not negative_check: suggestion = "just 'expr' or 'expr is True'" else: suggestion = "just 'not expr' or 'expr is False'" self.add_message('singleton-comparison', node=root_node, args=(True, suggestion)) elif singleton.value is False: if not negative_check: suggestion = "'not expr' or 'expr is False'" else: suggestion = "'expr' or 'expr is not False'" self.add_message('singleton-comparison', node=root_node, args=(False, suggestion)) elif singleton.value is None: if not negative_check: suggestion = "'expr is None'" else: suggestion = "'expr is not None'" self.add_message('singleton-comparison', node=root_node, args=(None, suggestion)) def _check_literal_comparison(self, literal, node): """Check if we compare to a literal, which is usually what we do not want to do.""" nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set) is_other_literal = isinstance(literal, nodes) is_const = False if isinstance(literal, astroid.Const): if literal.value in (True, False, None): # Not interested in this values. return is_const = isinstance(literal.value, (bytes, str, int, float)) if is_const or is_other_literal: self.add_message('literal-comparison', node=node) def _check_misplaced_constant(self, node, left, right, operator): if isinstance(right, astroid.Const): return operator = REVERSED_COMPS.get(operator, operator) suggestion = '%s %s %r' % (right.as_string(), operator, left.value) self.add_message('misplaced-comparison-constant', node=node, args=(suggestion,)) def _check_logical_tautology(self, node): """Check if identifier is compared against itself. :param node: Compare node :type node: astroid.node_classes.Compare :Example: val = 786 if val == val: # [comparison-with-itself] pass """ left_operand = node.left right_operand = node.ops[0][1] operator = node.ops[0][0] if (isinstance(left_operand, astroid.Const) and isinstance(right_operand, astroid.Const)): left_operand = left_operand.value right_operand = right_operand.value elif (isinstance(left_operand, astroid.Name) and isinstance(right_operand, astroid.Name)): left_operand = left_operand.name right_operand = right_operand.name if left_operand == right_operand: suggestion = "%s %s %s" % (left_operand, operator, right_operand) self.add_message('comparison-with-itself', node=node, args=(suggestion,)) @utils.check_messages('singleton-comparison', 'misplaced-comparison-constant', 'unidiomatic-typecheck', 'literal-comparison', 'comparison-with-itself') def visit_compare(self, node): self._check_logical_tautology(node) self._check_unidiomatic_typecheck(node) # NOTE: this checker only works with binary comparisons like 'x == 42' # but not 'x == y == 42' if len(node.ops) != 1: return left = node.left operator, right = node.ops[0] if (operator in ('<', '<=', '>', '>=', '!=', '==') and isinstance(left, astroid.Const)): self._check_misplaced_constant(node, left, right, operator) if operator == '==': if isinstance(left, astroid.Const): self._check_singleton_comparison(left, node) elif isinstance(right, astroid.Const): self._check_singleton_comparison(right, node) if operator == '!=': if isinstance(right, astroid.Const): self._check_singleton_comparison(right, node, negative_check=True) if operator in ('is', 'is not'): self._check_literal_comparison(right, node) def _check_unidiomatic_typecheck(self, node): operator, right = node.ops[0] if operator in TYPECHECK_COMPARISON_OPERATORS: left = node.left if _is_one_arg_pos_call(left): self._check_type_x_is_y(node, left, operator, right) def _check_type_x_is_y(self, node, left, operator, right): """Check for expressions like type(x) == Y.""" left_func = utils.safe_infer(left.func) if not (isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME): return if operator in ('is', 'is not') and _is_one_arg_pos_call(right): right_func = utils.safe_infer(right.func) if (isinstance(right_func, astroid.ClassDef) and right_func.qname() == TYPE_QNAME): # type(x) == type(a) right_arg = utils.safe_infer(right.args[0]) if not isinstance(right_arg, LITERAL_NODE_TYPES): # not e.g. type(x) == type([]) return self.add_message('unidiomatic-typecheck', node=node) def register(linter): """required method to auto register this checker""" linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter)) linter.register_checker(LambdaForComprehensionChecker(linter)) linter.register_checker(ComparisonChecker(linter))
1
10,290
I'd use the same message that I suggested in the release changelog.
PyCQA-pylint
py
@@ -53,7 +53,7 @@ function DocumentArray (key, schema, options) { if (!Array.isArray(arr)) arr = [arr]; return new MongooseDocumentArray(arr, path, this); }); -}; +} /** * This schema type's name, to defend against minifiers that mangle
1
/*! * Module dependencies. */ var SchemaType = require('../schematype') , ArrayType = require('./array') , MongooseDocumentArray = require('../types/documentarray') , Subdocument = require('../types/embedded') , Document = require('../document'); var utils = require('../utils.js'); /** * SubdocsArray SchemaType constructor * * @param {String} key * @param {Schema} schema * @param {Object} options * @inherits SchemaArray * @api private */ function DocumentArray (key, schema, options) { // compile an embedded document for this schema function EmbeddedDocument () { this.$__setSchema(schema); // apply methods for (var i in schema.methods) { this[i] = schema.methods[i]; } Subdocument.apply(this, arguments); } EmbeddedDocument.prototype = Subdocument.prototype; EmbeddedDocument.schema = schema; // apply statics for (var i in schema.statics) EmbeddedDocument[i] = schema.statics[i]; EmbeddedDocument.options = options; this.schema = schema; ArrayType.call(this, key, EmbeddedDocument, options); this.schema = schema; var path = this.path; var fn = this.defaultValue; this.default(function(){ var arr = fn.call(this); if (!Array.isArray(arr)) arr = [arr]; return new MongooseDocumentArray(arr, path, this); }); }; /** * This schema type's name, to defend against minifiers that mangle * function names. * * @api private */ DocumentArray.schemaName = 'DocumentArray'; /*! * Inherits from ArrayType. */ DocumentArray.prototype = Object.create( ArrayType.prototype ); DocumentArray.prototype.constructor = DocumentArray; /** * Performs local validations first, then validations on each embedded doc * * @api private */ DocumentArray.prototype.doValidate = function (array, fn, scope) { var self = this; SchemaType.prototype.doValidate.call(this, array, function (err) { if (err) return fn(err); var count = array && array.length , error; if (!count) return fn(); // handle sparse arrays, do not use array.forEach which does not // iterate over sparse elements yet reports array.length including // them :( for (var i = 0, len = count; i < len; ++i) { // sidestep sparse entries var doc = array[i]; if (!doc) { --count || fn(); continue; } ;(function (i) { doc.validate(function (err) { if (err && !error) { // rewrite the key err.key = self.key + '.' + i + '.' + err.key; return fn(error = err); } --count || fn(); }); })(i); } }, scope); }; /** * Casts contents * * @param {Object} value * @param {Document} document that triggers the casting * @api private */ DocumentArray.prototype.cast = function (value, doc, init, prev) { var selected , subdoc , i if (!Array.isArray(value)) { return this.cast([value], doc, init, prev); } if (!(value && value.isMongooseDocumentArray)) { value = new MongooseDocumentArray(value, this.path, doc); if (prev && prev._handlers) { for (var key in prev._handlers) { doc.removeListener(key, prev._handlers[key]); } } } i = value.length; while (i--) { if (!(value[i] instanceof Subdocument) && value[i]) { if (init) { selected || (selected = scopePaths(this, doc.$__.selected, init)); subdoc = new this.casterConstructor(null, value, true, selected); value[i] = subdoc.init(value[i]); } else { try { subdoc = prev.id(value[i]._id); } catch(e) {} if (prev && subdoc) { // handle resetting doc with existing id but differing data // doc.array = [{ doc: 'val' }] subdoc.set(value[i]); } else { subdoc = new this.casterConstructor(value[i], value); } // if set() is hooked it will have no return value // see gh-746 value[i] = subdoc; } } } return value; } /*! * Scopes paths selected in a query to this array. * Necessary for proper default application of subdocument values. * * @param {DocumentArray} array - the array to scope `fields` paths * @param {Object|undefined} fields - the root fields selected in the query * @param {Boolean|undefined} init - if we are being created part of a query result */ function scopePaths (array, fields, init) { if (!(init && fields)) return undefined; var path = array.path + '.' , keys = Object.keys(fields) , i = keys.length , selected = {} , hasKeys , key while (i--) { key = keys[i]; if (0 === key.indexOf(path)) { hasKeys || (hasKeys = true); selected[key.substring(path.length)] = fields[key]; } } return hasKeys && selected || undefined; } /*! * Module exports. */ module.exports = DocumentArray;
1
12,596
Keep semi-colons at end of function defs please
Automattic-mongoose
js
@@ -2,13 +2,18 @@ const OperationBase = require('./operation').OperationBase; const resolveReadPreference = require('../utils').resolveReadPreference; +const ReadConcern = require('../read_concern'); +const WriteConcern = require('../write_concern'); class CommandOperationV2 extends OperationBase { constructor(parent, options) { super(options); this.ns = parent.s.namespace.withCollection('$cmd'); - this.readPreference = resolveReadPreference(parent, options); + this.readPreference = resolveReadPreference(parent, this.options); + this.readConcern = resolveReadConcern(parent, this.options); + this.writeConcern = resolveWriteConcern(parent, this.options); + this.explain = false; // TODO(NODE-2056): make logger another "inheritable" property if (parent.s.logger) {
1
'use strict'; const OperationBase = require('./operation').OperationBase; const resolveReadPreference = require('../utils').resolveReadPreference; class CommandOperationV2 extends OperationBase { constructor(parent, options) { super(options); this.ns = parent.s.namespace.withCollection('$cmd'); this.readPreference = resolveReadPreference(parent, options); // TODO(NODE-2056): make logger another "inheritable" property if (parent.s.logger) { this.logger = parent.s.logger; } else if (parent.s.db && parent.s.db.logger) { this.logger = parent.s.db.logger; } } executeCommand(server, cmd, callback) { if (this.logger && this.logger.isDebug()) { this.logger.debug(`executing command ${JSON.stringify(cmd)} against ${this.ns}`); } server.command(this.ns.toString(), cmd, this.options, (err, result) => { if (err) { callback(err, null); return; } // full response was requested if (this.options.full) { callback(null, result); return; } callback(null, result.result); }); } } module.exports = CommandOperationV2;
1
16,044
A little skeptical of `explain` being here. Why is this now a property of all commands?
mongodb-node-mongodb-native
js
@@ -1,5 +1,7 @@ -// Copyright (c) Microsoft. All rights reserved. -// Copyright (c) Microsoft. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. namespace Microsoft.TestPlatform.Extensions.TrxLogger.ObjectModel {
1
// Copyright (c) Microsoft. All rights reserved. // Copyright (c) Microsoft. All rights reserved. namespace Microsoft.TestPlatform.Extensions.TrxLogger.ObjectModel { /// <summary> /// Interface used to define a data attachment. /// </summary> public interface IDataAttachment { /// <summary> /// Gets the description for the attachment. /// </summary> string Description { get; } } }
1
11,394
License copied two times!!
microsoft-vstest
.cs
@@ -44,7 +44,8 @@ class CustomWaiter: """ def __init__( - self, name, operation, argument, acceptors, client, delay=10, max_tries=60): + self, name, operation, argument, acceptors, client, delay=10, max_tries=60, + matcher='path'): """ Subclasses should pass specific operations, arguments, and acceptors to their super class.
1
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Base class for implementing custom waiters for services that don't already have prebuilt waiters. This class leverages botocore waiter code. """ from enum import Enum import logging import botocore.waiter logger = logging.getLogger(__name__) class WaitState(Enum): SUCCESS = 'success' FAILURE = 'failure' class CustomWaiter: """ Base class for a custom waiter that leverages botocore's waiter code. Waiters poll an operation, with a specified delay between each polling attempt, until either an accepted result is returned or the number of maximum attempts is reached. To use, implement a subclass that passes the specific operation, arguments, and acceptors to the superclass. For example, to implement a custom waiter for the transcription client that waits for both success and failure outcomes of the get_transcription_job function, create a class like the following: class TranscribeCompleteWaiter(CustomWaiter): def __init__(self, client): super().__init__( 'TranscribeComplete', 'GetTranscriptionJob', 'TranscriptionJob.TranscriptionJobStatus', {'COMPLETED': WaitState.SUCCESS, 'FAILED': WaitState.FAILURE}, client) def wait(self, job_name): self._wait(TranscriptionJobName=job_name) """ def __init__( self, name, operation, argument, acceptors, client, delay=10, max_tries=60): """ Subclasses should pass specific operations, arguments, and acceptors to their super class. :param name: The name of the waiter. This can be any descriptive string. :param operation: The operation to wait for. This must match the casing of the underlying operation model, which is typically in CamelCase. :param argument: The dict keys used to access the result of the operation, in dot notation. For example, 'Job.Status' will access result['Job']['Status']. :param acceptors: The list of acceptors that indicate the wait is over. These can indicate either success or failure. The acceptor values are compared to the result of the operation after the argument keys are applied. :param client: The Boto3 client. :param delay: The number of seconds to wait between each call to the operation. :param max_tries: The maximum number of tries before exiting. """ self.name = name self.operation = operation self.argument = argument self.client = client self.waiter_model = botocore.waiter.WaiterModel({ 'version': 2, 'waiters': { name: { "delay": delay, "operation": operation, "maxAttempts": max_tries, "acceptors": [{ "state": state.value, "matcher": "path", "argument": argument, "expected": expected } for expected, state in acceptors.items()] }}}) self.waiter = botocore.waiter.create_waiter_with_client( self.name, self.waiter_model, self.client) def __call__(self, parsed, **kwargs): """ Handles the after-call event by logging information about the operation and its result. :param parsed: The parsed response from polling the operation. :param kwargs: Not used, but expected by the caller. """ status = parsed for key in self.argument.split('.'): status = status.get(key) logger.info( "Waiter %s called %s, got %s.", self.name, self.operation, status) def _wait(self, **kwargs): """ Registers for the after-call event and starts the botocore wait loop. :param kwargs: Keyword arguments that are passed to the operation being polled. """ event_name = f'after-call.{self.client.meta.service_model.service_name}' self.client.meta.events.register(event_name, self) self.waiter.wait(**kwargs) self.client.meta.events.unregister(event_name, self)
1
17,932
... their **superclass**
awsdocs-aws-doc-sdk-examples
rb
@@ -526,7 +526,7 @@ module Beaker cmdline_args = conf_opts[:__commandline_args__] conf_opts = conf_opts.reject { |k,v| k == :__commandline_args__ } - curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries'] + curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries'] || 0 logger.debug "Setting curl retries to #{curl_retries}" begin
1
# -*- coding: utf-8 -*- require 'resolv' require 'inifile' require 'timeout' require 'beaker/dsl/outcomes' module Beaker module DSL # This is the heart of the Puppet Acceptance DSL. Here you find a helper # to proxy commands to hosts, more commands to move files between hosts # and execute remote scripts, confine test cases to certain hosts and # prepare the state of a test case. # # To mix this is into a class you need the following: # * a method *hosts* that yields any hosts implementing # {Beaker::Host}'s interface to act upon. # * a method *options* that provides an options hash, see {Beaker::Options::OptionsHash} # * a method *logger* that yields a logger implementing # {Beaker::Logger}'s interface. # * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing # {Beaker::Host}'s interface to act upon # * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation # # # @api dsl module Helpers PUPPET_MODULE_INSTALL_IGNORE = ['.git', '.idea', '.vagrant', '.vendor', 'acceptance', 'spec', 'tests', 'log'] # @!macro common_opts # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Boolean] :silent (false) Do not produce log output # @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array # (or range) of integer exit codes that should be considered # acceptable. An error will be thrown if the exit code does not # match one of the values in this list. # @option opts [Hash{String=>String}] :environment ({}) These will be # treated as extra environment variables that should be set before # running the command. # # The primary method for executing commands *on* some set of hosts. # # @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @param [String, Command] command The command to execute on *host*. # @param [Proc] block Additional actions or assertions. # @!macro common_opts # # @example Most basic usage # on hosts, 'ls /tmp' # # @example Allowing additional exit codes to pass # on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2] # # @example Using the returned result for any kind of checking # if on(host, 'ls -la ~').stdout =~ /\.bin/ # ...do some action... # end # # @example Using TestCase helpers from within a test. # agents.each do |agent| # on agent, 'cat /etc/puppet/puppet.conf' do # assert_match stdout, /server = #{master}/, 'WTF Mate' # end # end # # @example Using a role (defined in a String) to identify the host # on "master", "echo hello" # # @example Using a role (defined in a Symbol) to identify the host # on :dashboard, "echo hello" # # @return [Result] An object representing the outcome of *command*. # @raise [FailTest] Raises an exception if *command* obviously fails. def on(host, command, opts = {}, &block) unless command.is_a? Command cmd_opts = {} if opts[:environment] cmd_opts['ENV'] = opts[:environment] end command = Command.new(command.to_s, [], cmd_opts) end if host.is_a? String or host.is_a? Symbol host = hosts_as(host) #check by role end if host.is_a? Array host.map { |h| on h, command, opts, &block } else @result = host.exec(command, opts) # Also, let additional checking be performed by the caller. if block_given? case block.arity #block with arity of 0, just hand back yourself when 0 yield self #block with arity of 1 or greater, hand back the result object else yield @result end end return @result end end # The method for executing commands on the default host # # @param [String, Command] command The command to execute on *host*. # @param [Proc] block Additional actions or assertions. # @!macro common_opts # # @example Most basic usage # shell 'ls /tmp' # # @example Allowing additional exit codes to pass # shell 'puppet agent -t', :acceptable_exit_codes => [0,2] # # @example Using the returned result for any kind of checking # if shell('ls -la ~').stdout =~ /\.bin/ # ...do some action... # end # # @example Using TestCase helpers from within a test. # agents.each do |agent| # shell('cat /etc/puppet/puppet.conf') do |result| # assert_match result.stdout, /server = #{master}/, 'WTF Mate' # end # end # # @return [Result] An object representing the outcome of *command*. # @raise [FailTest] Raises an exception if *command* obviously fails. def shell(command, opts = {}, &block) on(default, command, opts, &block) end # @deprecated # An proxy for the last {Beaker::Result#stdout} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def stdout return nil if @result.nil? @result.stdout end # @deprecated # An proxy for the last {Beaker::Result#stderr} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def stderr return nil if @result.nil? @result.stderr end # @deprecated # An proxy for the last {Beaker::Result#exit_code} returned by # a method that makes remote calls. Use the {Beaker::Result} # object returned by the method directly instead. For Usage see # {Beaker::Result}. def exit_code return nil if @result.nil? @result.exit_code end # Move a file from a remote to a local path # @note If using {Beaker::Host} for the hosts *scp* is not # required on the system as it uses Ruby's net/scp library. The # net-scp gem however is required (and specified in the gemspec). # # @param [Host, #do_scp_from] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] from_path A remote path to a file. # @param [String] to_path A local path to copy *from_path* to. # @!macro common_opts # # @return [Result] Returns the result of the SCP operation def scp_from host, from_path, to_path, opts = {} if host.is_a? Array host.each { |h| scp_from h, from_path, to_path, opts } else @result = host.do_scp_from(from_path, to_path, opts) @result.log logger end end # Move a local file to a remote host # @note If using {Beaker::Host} for the hosts *scp* is not # required on the system as it uses Ruby's net/scp library. The # net-scp gem however is required (and specified in the gemspec. # # @param [Host, #do_scp_to] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_to}. # @param [String] from_path A local path to a file. # @param [String] to_path A remote path to copy *from_path* to. # @!macro common_opts # # @return [Result] Returns the result of the SCP operation def scp_to host, from_path, to_path, opts = {} if host.is_a? Array host.each { |h| scp_to h, from_path, to_path, opts } else @result = host.do_scp_to(from_path, to_path, opts) @result.log logger end end # Check to see if a package is installed on a remote host # # @param [Host] host A host object # @param [String] package_name Name of the package to check for. # # @return [Boolean] true/false if the package is found def check_for_package host, package_name host.check_for_package package_name end # Install a package on a host # # @param [Host] host A host object # @param [String] package_name Name of the package to install # # @return [Result] An object representing the outcome of *install command*. def install_package host, package_name, package_version = nil host.install_package package_name, '', package_version end # Upgrade a package on a host. The package must already be installed # # @param [Host] host A host object # @param [String] package_name Name of the package to install # # @return [Result] An object representing the outcome of *upgrade command*. def upgrade_package host, package_name host.upgrade_package package_name end # Deploy packaging configurations generated by # https://github.com/puppetlabs/packaging to a host. # # @note To ensure the repo configs are available for deployment, # you should run `rake pl:jenkins:deb_repo_configs` and # `rake pl:jenkins:rpm_repo_configs` on your project checkout # # @param [Host] host # @param [String] path The path to the generated repository config # files. ex: /myproject/pkg/repo_configs # @param [String] name A human-readable name for the repository # @param [String] version The version of the project, as used by the # packaging tools. This can be determined with # `rake pl:print_build_params` from the packaging # repo. def deploy_package_repo host, path, name, version host.deploy_package_repo path, name, version end # Create a remote file out of a string # @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}. # # @param [Host, #do_scp_to] hosts One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] file_path A remote path to place *file_content* at. # @param [String] file_content The contents of the file to be placed. # @!macro common_opts # # @return [Result] Returns the result of the underlying SCP operation. def create_remote_file(hosts, file_path, file_content, opts = {}) Tempfile.open 'beaker' do |tempfile| File.open(tempfile.path, 'w') {|file| file.puts file_content } scp_to hosts, tempfile.path, file_path, opts end end # Create a temp directory on remote host owned by specified user. # # @param [Host] host A single remote host on which to create and adjust # the ownership of a temp directory. # @param [String] name A remote path prefix for the new temp # directory. Default value is '/tmp/beaker' # @param [String] user The name of user that should own the temp # directory. If no username is specified, use `puppet master # --configprint user` to obtain username from master. Raise RuntimeError # if this puppet command returns a non-zero exit code. # # @return [String] Returns the name of the newly-created file. def create_tmpdir_for_user(host, name='/tmp/beaker', user=nil) if not user result = on(host, "puppet master --configprint user") if not result.exit_code == 0 raise "`puppet master --configprint` failed, check that puppet is installed on #{host} or explicitly pass in a user name." end user = result.stdout.strip end if not on(host, "getent passwd #{user}").exit_code == 0 raise "User #{user} does not exist on #{host}." end if defined? host.tmpdir dir = host.tmpdir(name) on host, "chown #{user}.#{user} #{dir}" return dir else raise "Host platform not supported by `create_tmpdir_for_user`." end end # Move a local script to a remote host and execute it # @note this relies on {#on} and {#scp_to} # # @param [Host, #do_scp_to] host One or more hosts (or some object # that responds like # {Beaker::Host#do_scp_from}. # @param [String] script A local path to find an executable script at. # @!macro common_opts # @param [Proc] block Additional tests to run after script has executed # # @return [Result] Returns the result of the underlying SCP operation. def run_script_on(host, script, opts = {}, &block) # this is unsafe as it uses the File::SEPARATOR will be set to that # of the coordinator node. This works for us because we use cygwin # which will properly convert the paths. Otherwise this would not # work for running tests on a windows machine when the coordinator # that the harness is running on is *nix. We should use # {Beaker::Host#temp_path} instead. TODO remote_path = File.join("", "tmp", File.basename(script)) scp_to host, script, remote_path on host, remote_path, opts, &block end # Move a local script to default host and execute it # @see #run_script_on def run_script(script, opts = {}, &block) run_script_on(default, script, opts, &block) end # Copy a puppet module from a given source to all hosts under test. # Assumes each host under test has an associated 'distmoduledir' (set in the # host configuration YAML file). # # @param opts [Hash] # @option opts [String] :source The location on the test runners box where the files are found # @option opts [String] :module_name The name of the module to be copied over def puppet_module_install_on(host, opts = {}) Array(host).each do |h| on h, puppet("module install #{opts[:module_name]}") end end # Copy a puppet module from a given source to all hosts under test. # @see #puppet_module_install_on def puppet_module_install opts = {} puppet_module_install_on(hosts, opts) end # Limit the hosts a test case is run against # @note This will modify the {Beaker::TestCase#hosts} member # in place unless an array of hosts is passed into it and # {Beaker::TestCase#logger} yielding an object that responds # like {Beaker::Logger#warn}, as well as # {Beaker::DSL::Outcomes#skip_test}, and optionally # {Beaker::TestCase#hosts}. # # @param [Symbol] type The type of confinement to do. Valid parameters # are *:to* to confine the hosts to only those that # match *criteria* or *:except* to confine the test # case to only those hosts that do not match # criteria. # @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}] # criteria Specify the criteria with which a host should be # considered for inclusion or exclusion. The key is any attribute # of the host that will be yielded by {Beaker::Host#[]}. # The value can be any string/regex or array of strings/regexp. # The values are compared using [Enumerable#any?] so that if one # value of an array matches the host is considered a match for that # criteria. # @param [Array<Host>] host_array This creatively named parameter is # an optional array of hosts to confine to. If not passed in, this # method will modify {Beaker::TestCase#hosts} in place. # @param [Proc] block Addition checks to determine suitability of hosts # for confinement. Each host that is still valid after checking # *criteria* is then passed in turn into this block. The block # should return true if the host matches this additional criteria. # # @example Basic usage to confine to debian OSes. # confine :to, :platform => 'debian' # # @example Confining to anything but Windows and Solaris # confine :except, :platform => ['windows', 'solaris'] # # @example Using additional block to confine to Solaris global zone. # confine :to, :platform => 'solaris' do |solaris| # on( solaris, 'zonename' ) =~ /global/ # end # # @return [Array<Host>] Returns an array of hosts that are still valid # targets for this tests case. # @raise [SkipTest] Raises skip test if there are no valid hosts for # this test case after confinement. def confine(type, criteria, host_array = nil, &block) provided_hosts = host_array ? true : false hosts_to_modify = host_array || hosts criteria.each_pair do |property, value| case type when :except hosts_to_modify = hosts_to_modify.reject do |host| inspect_host host, property, value end if block_given? hosts_to_modify = hosts_to_modify.reject do |host| yield host end end when :to hosts_to_modify = hosts_to_modify.select do |host| inspect_host host, property, value end if block_given? hosts_to_modify = hosts_to_modify.select do |host| yield host end end else raise "Unknown option #{type}" end end if hosts_to_modify.empty? logger.warn "No suitable hosts with: #{criteria.inspect}" skip_test 'No suitable hosts found' end self.hosts = hosts_to_modify hosts_to_modify end # Ensures that host restrictions as specifid by type, criteria and # host_array are confined to activity within the passed block. # TestCase#hosts is reset after block has executed. # # @see #confine def confine_block(type, criteria, host_array = nil, &block) begin original_hosts = self.hosts.dup confine(type, criteria, host_array) yield ensure self.hosts = original_hosts end end # @!visibility private def inspect_host(host, property, one_or_more_values) values = Array(one_or_more_values) return values.any? do |value| true_false = false case value when String true_false = host[property.to_s].include? value when Regexp true_false = host[property.to_s] =~ value end true_false end end # Test Puppet running in a certain run mode with specific options. # This ensures the following steps are performed: # 1. The pre-test Puppet configuration is backed up # 2. A new Puppet configuraton file is layed down # 3. Puppet is started or restarted in the specified run mode # 4. Ensure Puppet has started correctly # 5. Further tests are yielded to # 6. Revert Puppet to the pre-test state # 7. Testing artifacts are saved in a folder named for the test # # @param [Host] host One object that act like Host # # @param [Hash{Symbol=>String}] conf_opts Represents puppet settings. # Sections of the puppet.conf may be # specified, if no section is specified the # a puppet.conf file will be written with the # options put in a section named after [mode] # # There is a special setting for command_line # arguments such as --debug or --logdest, which # cannot be set in puppet.conf. For example: # # :__commandline_args__ => '--logdest /tmp/a.log' # # These will only be applied when starting a FOSS # master, as a pe master is just bounced. # # @param [File] testdir The temporary directory which will hold backup # configuration, and other test artifacts. # # @param [Block] block The point of this method, yields so # tests may be ran. After the block is finished # puppet will revert to a previous state. # # @example A simple use case to ensure a master is running # with_puppet_running_on( master ) do # ...tests that require a master... # end # # @example Fully utilizing the possiblities of config options # with_puppet_running_on( master, # :main => {:logdest => '/var/blah'}, # :master => {:masterlog => '/elswhere'}, # :agent => {:server => 'localhost'} ) do # # ...tests to be ran... # end # # @api dsl def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash) cmdline_args = conf_opts[:__commandline_args__] conf_opts = conf_opts.reject { |k,v| k == :__commandline_args__ } curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries'] logger.debug "Setting curl retries to #{curl_retries}" begin backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf') lay_down_new_puppet_conf host, conf_opts, testdir if host['puppetservice'] bounce_service( host, host['puppetservice'], curl_retries ) else puppet_master_started = start_puppet_from_source_on!( host, cmdline_args ) end yield self if block_given? rescue Exception => early_exception original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n") raise(original_exception) ensure begin restore_puppet_conf_from_backup( host, backup_file ) if host['puppetservice'] bounce_service( host, host['puppetservice'], curl_retries ) else if puppet_master_started stop_puppet_from_source_on( host ) else dump_puppet_log(host) end end rescue Exception => teardown_exception begin if !host.is_pe? dump_puppet_log(host) end rescue Exception => dumping_exception logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}") end if original_exception logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n") raise original_exception else raise teardown_exception end end end end # Test Puppet running in a certain run mode with specific options, # on the default host # @api dsl # @see #with_puppet_running_on def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block with_puppet_running_on(default, conf_opts, testdir, &block) end # @!visibility private def restore_puppet_conf_from_backup( host, backup_file ) puppetpath = host['puppetpath'] puppet_conf = File.join(puppetpath, "puppet.conf") if backup_file host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " + "cat '#{backup_file}' > " + "'#{puppet_conf}'; " + "rm -f '#{backup_file}'; " + "fi" ) ) else host.exec( Command.new( "rm -f '#{puppet_conf}'" )) end end # Back up the given file in the current_dir to the new_dir # # @!visibility private # # @param host [Beaker::Host] The target host # @param current_dir [String] The directory containing the file to back up # @param new_dir [String] The directory to copy the file to # @param filename [String] The file to back up. Defaults to 'puppet.conf' # # @return [String, nil] The path to the file if the file exists, nil if it # doesn't exist. def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf' old_location = current_dir + '/' + filename new_location = new_dir + '/' + filename + '.bak' if host.file_exist? old_location host.exec( Command.new( "cp #{old_location} #{new_location}" ) ) return new_location else logger.warn "Could not backup file '#{old_location}': no such file" nil end end # @!visibility private def start_puppet_from_source_on! host, args = '' host.exec( puppet( 'master', args ) ) logger.debug 'Waiting for the puppet master to start' unless port_open_within?( host, 8140, 10 ) raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion' end logger.debug 'The puppet master has started' return true end # @!visibility private def stop_puppet_from_source_on( host ) pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp host.exec( Command.new( "kill #{pid}" ) ) Timeout.timeout(10) do while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do # until kill -0 finds no process and we know that puppet has finished cleaning up sleep 1 end end end # @!visibility private def dump_puppet_log(host) syslogfile = case host['platform'] when /fedora|centos|el/ then '/var/log/messages' when /ubuntu|debian/ then '/var/log/syslog' else return end logger.notify "\n*************************" logger.notify "* Dumping master log *" logger.notify "*************************" host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1]) logger.notify "*************************\n" end # @!visibility private def lay_down_new_puppet_conf( host, configuration_options, testdir ) new_conf = puppet_conf_for( host, configuration_options ) create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s host.exec( Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ), :silent => true ) host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ) end # @!visibility private def puppet_conf_for host, conf_opts puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout new_conf = IniFile.new( puppetconf ).merge( conf_opts ) new_conf end # @!visibility private def bounce_service host, service, curl_retries = 120 host.exec puppet_resource( 'service', service, 'ensure=stopped' ) host.exec puppet_resource( 'service', service, 'ensure=running' ) curl_with_retries(" #{service} ", host, "https://localhost:8140", [35, 60], curl_retries) end # Blocks until the port is open on the host specified, returns false # on failure def port_open_within?( host, port = 8140, seconds = 120 ) repeat_for( seconds ) do host.port_open?( port ) end end # Runs 'puppet apply' on a remote host, piping manifest through stdin # # @param [Host] host The host that this command should be run on # # @param [String] manifest The puppet manifest to apply # # @!macro common_opts # @option opts [Boolean] :parseonly (false) If this key is true, the # "--parseonly" command line parameter will # be passed to the 'puppet apply' command. # # @option opts [Boolean] :trace (false) If this key exists in the Hash, # the "--trace" command line parameter will be # passed to the 'puppet apply' command. # # @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit # codes that will NOT raise an error when found upon # command completion. If provided, these values will # be combined with those used in :catch_failures and # :expect_failures to create the full list of # passing exit codes. # # @option opts [Hash] :environment Additional environment variables to be # passed to the 'puppet apply' command # # @option opts [Boolean] :catch_failures (false) By default `puppet # --apply` will exit with 0, which does not count # as a test failure, even if there were errors or # changes when applying the manifest. This option # enables detailed exit codes and causes a test # failure if `puppet --apply` indicates there was # a failure during its execution. # # @option opts [Boolean] :catch_changes (false) This option enables # detailed exit codes and causes a test failure # if `puppet --apply` indicates that there were # changes or failures during its execution. # # @option opts [Boolean] :expect_changes (false) This option enables # detailed exit codes and causes a test failure # if `puppet --apply` indicates that there were # no resource changes during its execution. # # @option opts [Boolean] :expect_failures (false) This option enables # detailed exit codes and causes a test failure # if `puppet --apply` indicates there were no # failure during its execution. # # @option opts [Boolean] :future_parser (false) This option enables # the future parser option that is available # from Puppet verion 3.2 # By default it will use the 'current' parser. # # @option opts [Boolean] :noop (false) If this option exists, the # the "--noop" command line parameter will be # passed to the 'puppet apply' command. # # @option opts [String] :modulepath The search path for modules, as # a list of directories separated by the system # path separator character. (The POSIX path separator # is ‘:’, and the Windows path separator is ‘;’.) # # @param [Block] block This method will yield to a block of code passed # by the caller; this can be used for additional # validation, etc. # def apply_manifest_on(host, manifest, opts = {}, &block) if host.is_a?(Array) return host.map do |h| apply_manifest_on(h, manifest, opts, &block) end end on_options = {} on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes]) puppet_apply_opts = {} puppet_apply_opts[:verbose] = nil puppet_apply_opts[:parseonly] = nil if opts[:parseonly] puppet_apply_opts[:trace] = nil if opts[:trace] puppet_apply_opts[:parser] = 'future' if opts[:future_parser] puppet_apply_opts[:modulepath] = opts[:modulepath] if opts[:modulepath] puppet_apply_opts[:noop] = nil if opts[:noop] # From puppet help: # "... an exit code of '2' means there were changes, an exit code of # '4' means there were failures during the transaction, and an exit # code of '6' means there were both changes and failures." if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].compact.length > 1 raise(ArgumentError, 'Cannot specify more than one of `catch_failures`, ' + '`catch_changes`, `expect_failures`, or `expect_changes` ' + 'for a single manifest') end if opts[:catch_changes] puppet_apply_opts['detailed-exitcodes'] = nil # We're after idempotency so allow exit code 0 only. on_options[:acceptable_exit_codes] |= [0] elsif opts[:catch_failures] puppet_apply_opts['detailed-exitcodes'] = nil # We're after only complete success so allow exit codes 0 and 2 only. on_options[:acceptable_exit_codes] |= [0, 2] elsif opts[:expect_failures] puppet_apply_opts['detailed-exitcodes'] = nil # We're after failures specifically so allow exit codes 1, 4, and 6 only. on_options[:acceptable_exit_codes] |= [1, 4, 6] elsif opts[:expect_changes] puppet_apply_opts['detailed-exitcodes'] = nil # We're after changes specifically so allow exit code 2 only. on_options[:acceptable_exit_codes] |= [2] else # Either use the provided acceptable_exit_codes or default to [0] on_options[:acceptable_exit_codes] |= [0] end # Not really thrilled with this implementation, might want to improve it # later. Basically, there is a magic trick in the constructor of # PuppetCommand which allows you to pass in a Hash for the last value in # the *args Array; if you do so, it will be treated specially. So, here # we check to see if our caller passed us a hash of environment variables # that they want to set for the puppet command. If so, we set the final # value of *args to a new hash with just one entry (the value of which # is our environment variables hash) if opts.has_key?(:environment) puppet_apply_opts['ENV'] = opts[:environment] end file_path = host.tmpfile('apply_manifest.pp') create_remote_file(host, file_path, manifest + "\n") if host[:default_apply_opts].respond_to? :merge puppet_apply_opts = host[:default_apply_opts].merge( puppet_apply_opts ) end on host, puppet('apply', file_path, puppet_apply_opts), on_options, &block end # Runs 'puppet apply' on default host, piping manifest through stdin # @see #apply_manifest_on def apply_manifest(manifest, opts = {}, &block) apply_manifest_on(default, manifest, opts, &block) end # @deprecated def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test', options={}, &block) if host.is_a? Array host.each { |h| run_agent_on h, arg, options, &block } else on host, puppet_agent(arg), options, &block end end # FIX: this should be moved into host/platform # @visibility private def run_cron_on(host, action, user, entry="", &block) platform = host['platform'] if platform.include?('solaris') || platform.include?('aix') then case action when :list then args = '-l' when :remove then args = '-r' when :add on( host, "echo '#{entry}' > /var/spool/cron/crontabs/#{user}", &block ) end else # default for GNU/Linux platforms case action when :list then args = '-l -u' when :remove then args = '-r -u' when :add on( host, "echo '#{entry}' > /tmp/#{user}.cron && " + "crontab -u #{user} /tmp/#{user}.cron", &block ) end end if args case action when :list, :remove then on(host, "crontab #{args} #{user}", &block) end end end # This method accepts a block and using the puppet resource 'host' will # setup host aliases before and after that block. # # A teardown step is also added to make sure unstubbing of the host is # removed always. # # @param machine [String] the host to execute this stub # @param ip_spec [Hash{String=>String}] a hash containing the host to ip # mappings # @example Stub puppetlabs.com on the master to 127.0.0.1 # stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1') def stub_hosts_on(machine, ip_spec) ip_spec.each do |host, ip| logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}") on( machine, puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") ) end teardown do ip_spec.each do |host, ip| logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}") on( machine, puppet('resource', 'host', host, 'ensure=absent') ) end end end # This method accepts a block and using the puppet resource 'host' will # setup host aliases before and after that block on the default host # # @example Stub puppetlabs.com on the default host to 127.0.0.1 # stub_hosts('puppetlabs.com' => '127.0.0.1') # @see #stub_hosts_on def stub_hosts(ip_spec) stub_hosts_on(default, ip_spec) end # This wraps the method `stub_hosts_on` and makes the stub specific to # the forge alias. # # forge api v1 canonical source is forge.puppetlabs.com # forge api v3 canonical source is forgeapi.puppetlabs.com # # @param machine [String] the host to perform the stub on # @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the # global options hash def stub_forge_on(machine, forge_host = nil) #use global options hash forge_host ||= options[:forge_host] @forge_ip ||= Resolv.getaddress(forge_host) stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip) stub_hosts_on(machine, 'forgeapi.puppetlabs.com' => @forge_ip) end # This wraps the method `stub_hosts` and makes the stub specific to # the forge alias. # # @see #stub_forge_on def stub_forge(forge_host = nil) #use global options hash forge_host ||= options[:forge_host] stub_forge_on(default, forge_host) end def sleep_until_puppetdb_started(host) curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120) curl_with_retries("start puppetdb (ssl)", host, "https://#{host.node_name}:8081", [35, 60]) end def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1) retry_command(desc, host, "curl -m 1 #{url}", desired_exit_codes, max_retries, retry_interval) end def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1, verbose = false) log_prefix = host.log_prefix logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{command}" logger.debug " Trying command #{max_retries} times." logger.debug ".", add_newline=false desired_exit_codes = [desired_exit_codes].flatten result = on host, command, {:acceptable_exit_codes => (0...127), :silent => !verbose} num_retries = 0 until desired_exit_codes.include?(result.exit_code) sleep retry_interval result = on host, command, {:acceptable_exit_codes => (0...127), :silent => !verbose} num_retries += 1 logger.debug ".", add_newline=false if (num_retries > max_retries) logger.debug " Command \`#{command}\` failed." fail("Command \`#{command}\` failed.") end end logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{command} ostensibly successful." end #Is semver-ish version a less than semver-ish version b #@param [String] a A version of the from '\d.\d.\d.*' #@param [String] b A version of the form '\d.\d.\d.*' #@return [Boolean] true if a is less than b, otherwise return false # #@note 3.0.0-160-gac44cfb is greater than 3.0.0, and 2.8.2 #@note -rc being less than final builds is not yet implemented. def version_is_less a, b a_nums = a.split('-')[0].split('.') b_nums = b.split('-')[0].split('.') (0...a_nums.length).each do |i| if i < b_nums.length if a_nums[i] < b_nums[i] return true elsif a_nums[i] > b_nums[i] return false end else return false end end #checks all dots, they are equal so examine the rest a_rest = a.split('-', 2)[1] b_rest = b.split('-', 2)[1] if a_rest and b_rest and a_rest < b_rest return false elsif a_rest and not b_rest return false elsif not a_rest and b_rest return true end return false end #stops the puppet agent running on the host def stop_agent_on(agent) vardir = agent.puppet['vardir'] agent_running = true while agent_running result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1] agent_running = (result.exit_code == 0) sleep 2 unless agent_running end # The agent service is `pe-puppet` everywhere EXCEPT certain linux distros on PE 2.8 # In all the case that it is different, this init script will exist. So we can assume # that if the script doesn't exist, we should just use `pe-puppet` result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1] agent_service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet' # Under a number of stupid circumstances, we can't stop the # agent using puppet. This is usually because of issues with # the init script or system on that particular configuration. avoid_puppet_at_all_costs = false avoid_puppet_at_all_costs ||= agent['platform'] =~ /el-4/ avoid_puppet_at_all_costs ||= agent['pe_ver'] && version_is_less(agent['pe_ver'], '3.2') && agent['platform'] =~ /sles/ if avoid_puppet_at_all_costs on agent, "/etc/init.d/#{agent_service} stop" else on agent, puppet_resource('service', agent_service, 'ensure=stopped') end end #stops the puppet agent running on the default host # @see #stop_agent_on def stop_agent stop_agent_on(default) end #wait for a given host to appear in the dashboard def wait_for_host_in_dashboard(host) hostname = host.node_name retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'") end # Ensure the host has requested a cert, then sign it # # @param [Host] host The host to sign for # # @return nil # @raise [FailTest] if process times out def sign_certificate_for(host) if [master, dashboard, database].include? host on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2] on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24] else hostname = Regexp.escape host.node_name last_sleep = 0 next_sleep = 1 (0..10).each do |i| fail_test("Failed to sign cert for #{hostname}") if i == 10 on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24] break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/ sleep next_sleep (last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep end end end #prompt the master to sign certs then check to confirm the cert for the default host is signed #@see #sign_certificate_for def sign_certificate sign_certificate_for(default) end # Get a facter fact from a provided host # # @param [Host] host The host to query the fact for # @param [String] name The name of the fact to query for # @!macro common_opts # # @return String The value of the fact 'name' on the provided host # @raise [FailTest] Raises an exception if call to facter fails def fact_on(host, name, opts = {}) result = on host, facter(name, opts) result.stdout.chomp if result.stdout end # Get a facter fact from the default host # @see #fact_on def fact(name, opts = {}) fact_on(default, name, opts) end #Run a curl command on the provided host(s) # # @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @param [String, Command] cmd The curl command to execute on *host*. # @param [Proc] block Additional actions or assertions. # @!macro common_opts # def curl_on(host, cmd, opts = {}, &block) if options.is_pe? #check global options hash on host, "curl --sslv3 %s" % cmd, opts, &block else on host, "curl %s" % cmd, opts, &block end end # Install local module for acceptance testing # should be used as a presuite to ensure local module is copied to the hosts you want, particularly masters # @api dsl # @param [Host, Array<Host>, String, Symbol] host # One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @option opts [String] :source ('./') # The current directory where the module sits, otherwise will try # and walk the tree to figure out # @option opts [String] :module_name (nil) # Name which the module should be installed under, please do not include author, # if none is provided it will attempt to parse the metadata.json and then the Modulefile to determine # the name of the module # @option opts [String] :target_module_path (host['distmoduledir']/modules) # Location where the module should be installed, will default # to host['distmoduledir']/modules # @option opts [Array] :ignore_list # @raise [ArgumentError] if not host is provided or module_name is not provided and can not be found in Modulefile # def copy_module_to(host, opts = {}) opts = {:source => './', :target_module_path => host['distmoduledir'], :ignore_list => PUPPET_MODULE_INSTALL_IGNORE}.merge(opts) ignore_list = build_ignore_list(opts) target_module_dir = opts[:target_module_path] if opts.has_key?(:module_name) module_name = opts[:module_name] else module_name = parse_for_modulename(opts[:source]) end scp_to host, File.join(opts[:source]), File.join(target_module_dir, module_name), {:ignore => ignore_list} end alias :copy_root_module_to :copy_module_to #Recursive method for finding the module root # Assumes that a Modulefile exists # @param [String] possible_module_directory # will look for Modulefile and if none found go up one level and try again until root is reached # # @return [String,nil] def parse_for_moduleroot(possible_module_directory) if File.exists?("#{possible_module_directory}/Modulefile") possible_module_directory elsif possible_module_directory === '/' logger.error "At root, can't parse for another directory" nil else logger.debug "No Modulefile found at #{possible_module_directory}, moving up" parse_for_moduleroot File.expand_path(File.join(possible_module_directory,'..')) end end #Parse root directory of a module for module name # Searches for metadata.json and then if none found, Modulefile and parses for the Name attribute # @param [String] root_module_dir # @return [String] module name def parse_for_modulename(root_module_dir) module_name = nil if File.exists?("#{root_module_dir}/metadata.json") logger.debug "Attempting to parse Modulename from metadata.json" module_json = JSON.parse (File.read "#{root_module_dir}/metadata.json") if(module_json.has_key?('name')) module_name = get_module_name(module_json['name']) end end if !module_name && File.exists?("#{root_module_dir}/Modulefile") logger.debug "Attempting to parse Modulename from Modulefile" if /^name\s+'?(\w+-\w+)'?\s*$/i.match(File.read("#{root_module_dir}/Modulefile")) module_name = get_module_name(Regexp.last_match[1]) end end if !module_name logger.debug "Unable to determine name, returning null" end module_name end #Parse modulename from the pattern 'Auther-ModuleName' # # @param [String] author_module_name <Author>-<ModuleName> pattern # # @return [String,nil] # def get_module_name(author_module_name) split_name = split_author_modulename(author_module_name) if split_name split_name[:module] end end #Split the Author-Name into a hash # @param [String] author_module_attr # # @return [Hash<Symbol,String>,nil] :author and :module symbols will be returned # def split_author_modulename(author_module_attr) result = /(\w+)-(\w+)/.match(author_module_attr) if result {:author => result[1], :module => result[2]} else nil end end # Build an array list of files/directories to ignore when pushing to remote host # Automatically adds '..' and '.' to array. If not opts of :ignore list is provided # it will use the static variable PUPPET_MODULE_INSTALL_IGNORE # # @param opts [Hash] # @option opts [Array] :ignore_list A list of files/directories to ignore def build_ignore_list(opts = {}) ignore_list = opts[:ignore_list] || PUPPET_MODULE_INSTALL_IGNORE if !ignore_list.kind_of?(Array) || ignore_list.nil? raise ArgumentError "Ignore list must be an Array" end ignore_list << '.' unless ignore_list.include? '.' ignore_list << '..' unless ignore_list.include? '..' ignore_list end end end end
1
6,206
I'm not sure if this is the right thing to be doing here. It seems like there should be a more systemic fix instead of this patch.
voxpupuli-beaker
rb
@@ -811,6 +811,7 @@ const baseSelectors = { /** * Gets the module's screenWidgetContext. * + * Returns `undefined` if dependencies are loading. * Returns `null` if there is no registered context string for the given module. * Returns `string` the registered context string, screenWidgetContext for the given module. *
1
/** * `core/modules` data store: module info. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import memize from 'memize'; import defaults from 'lodash/defaults'; import merge from 'lodash/merge'; import isPlainObject from 'lodash/isPlainObject'; import invariant from 'invariant'; import { sprintf, __ } from '@wordpress/i18n'; /** * WordPress dependencies */ import { WPComponent } from '@wordpress/element'; /** * Internal dependencies */ import API from 'googlesitekit-api'; import Data from 'googlesitekit-data'; import { STORE_NAME, ERROR_CODE_INSUFFICIENT_MODULE_DEPENDENCIES } from './constants'; import { CORE_SITE } from '../../datastore/site/constants'; import { CORE_USER } from '../../datastore/user/constants'; import { createFetchStore } from '../../data/create-fetch-store'; import { listFormat } from '../../../util'; import DefaultSettingsSetupIncomplete from '../../../components/settings/DefaultSettingsSetupIncomplete'; import { createValidatedAction } from '../../data/utils'; const { createRegistrySelector, createRegistryControl } = Data; // Actions. const REFETCH_AUTHENTICATION = 'REFETCH_AUTHENTICATION'; const SELECT_MODULE_REAUTH_URL = 'SELECT_MODULE_REAUTH_URL'; const REGISTER_MODULE = 'REGISTER_MODULE'; const RECEIVE_CHECK_REQUIREMENTS_ERROR = 'RECEIVE_CHECK_REQUIREMENTS_ERROR'; const RECEIVE_CHECK_REQUIREMENTS_SUCCESS = 'RECEIVE_CHECK_REQUIREMENTS_SUCCESS'; const moduleDefaults = { slug: '', storeName: null, name: '', description: '', homepage: null, internal: false, active: false, connected: false, dependencies: [], dependants: [], order: 10, features: [], Icon: null, SettingsEditComponent: null, SettingsViewComponent: null, SettingsSetupIncompleteComponent: DefaultSettingsSetupIncomplete, SetupComponent: null, }; const normalizeModules = memize( ( serverDefinitions, clientDefinitions ) => { // Module properties in `clientDefinitions` will overwrite `serverDefinitions` // but only for keys whose values are not `undefined`. const modules = merge( {}, serverDefinitions, clientDefinitions ); return Object.keys( modules ) .map( ( slug ) => { const module = { ...modules[ slug ], slug }; // Fill any `undefined` values with defaults. defaults( module, { name: slug }, moduleDefaults ); return module; } ) .sort( ( a, b ) => a.order - b.order ) .reduce( ( acc, module ) => { return { ...acc, [ module.slug ]: module }; }, {} ); } ); const fetchGetModulesStore = createFetchStore( { baseName: 'getModules', controlCallback: () => { return API.get( 'core', 'modules', 'list', null, { useCache: false, } ); }, reducerCallback: ( state, modules ) => { return { ...state, isAwaitingModulesRefresh: false, serverDefinitions: modules.reduce( ( acc, module ) => { return { ...acc, [ module.slug ]: module }; }, {} ), }; }, } ); const fetchSetModuleActivationStore = createFetchStore( { baseName: 'setModuleActivation', controlCallback: ( { slug, active } ) => { return API.set( 'core', 'modules', 'activation', { slug, active, } ); }, reducerCallback: ( state ) => { // Updated module activation state is handled by re-fetching module // data instead, so this reducer just sets the below flag. return { ...state, isAwaitingModulesRefresh: true, }; }, argsToParams: ( slug, active ) => { return { slug, active, }; }, validateParams: ( { slug, active } = {} ) => { invariant( slug, 'slug is required.' ); invariant( active !== undefined, 'active is required.' ); }, } ); const baseInitialState = { clientDefinitions: {}, serverDefinitions: undefined, // This value is to indicate that modules data needs to be refreshed after // a module activation update, since the activation is technically complete // before this data has been refreshed. isAwaitingModulesRefresh: false, checkRequirementsResults: {}, }; const baseActions = { /** * Activates a module on the server. * * Activate a module (based on the slug provided). * * @since 1.8.0 * * @param {string} slug Slug of the module to activate. * @return {Object} Object with `{response, error}`. On success, `response.moduleReauthURL` * is set to redirect the user to the corresponding module setup or OAuth * consent screen. */ *activateModule( slug ) { const { response, error } = yield baseActions.setModuleActivation( slug, true ); if ( response?.success === true ) { const moduleReauthURL = yield { payload: { slug }, type: SELECT_MODULE_REAUTH_URL, }; return { response: { ...response, moduleReauthURL }, error, }; } return { response, error }; }, /** * Deactivates a module on the server. * * Deactivate a module (based on the slug provided). * * @since 1.8.0 * * @param {string} slug Slug of the module to activate. * @return {Object} Object with `{response, error}`. */ *deactivateModule( slug ) { const { response, error } = yield baseActions.setModuleActivation( slug, false ); return { response, error }; }, /** * (De)activates a module on the server. * * POSTs to the `core/modules/activation` endpoint to set the `active` status * supplied for the give `slug`. * * @since 1.8.0 * @private * * @param {string} slug Slug of the module to activate/deactivate. * @param {boolean} active `true` to activate; `false` to deactivate. * @return {Object} Object with `{response, error}`. */ setModuleActivation: createValidatedAction( ( slug, active ) => { invariant( slug, 'slug is required.' ); invariant( active !== undefined, 'active is required.' ); }, function* ( slug, active ) { const { response, error } = yield fetchSetModuleActivationStore.actions.fetchSetModuleActivation( slug, active ); if ( response?.success === true ) { // Fetch (or re-fetch) all modules, with their updated status. yield fetchGetModulesStore.actions.fetchGetModules(); yield { payload: {}, type: REFETCH_AUTHENTICATION, }; } return { response, error }; } ), /** * Registers a module. * * @since 1.13.0 * @since 1.20.0 Introduced the ability to register settings and setup components. * @since 1.22.0 Introduced the ability to add a checkRequirements function. * @since 1.23.0 Introduced the ability to register an Icon component. * @since 1.24.0 Introduced the ability to explictly define a module store name. * * @param {string} slug Module slug. * @param {Object} [settings] Optional. Module settings. * @param {string} [settings.storeName] Optional. Module storeName. If none is provided we assume no store exists for this module. * @param {string} [settings.name] Optional. Module name. Default is the slug. * @param {string} [settings.description] Optional. Module description. Default empty string. * @param {Array.<string>} [settings.features] Optional. Module features. Default empty array. * @param {WPComponent} [settings.Icon] Optional. React component to render module icon. Default none. * @param {number} [settings.order] Optional. Numeric indicator for module order. Default 10. * @param {string} [settings.homepage] Optional. Module homepage URL. Default empty string. * @param {WPComponent} [settings.SettingsEditComponent] Optional. React component to render the settings edit panel. Default none. * @param {WPComponent} [settings.SettingsViewComponent] Optional. React component to render the settings view panel. Default none. * @param {WPComponent} [settings.SettingsSetupIncompleteComponent] Optional. React component to render the incomplete settings panel. Default none. * @param {WPComponent} [settings.SetupComponent] Optional. React component to render the setup panel. Default none. * @param {Function} [settings.checkRequirements] Optional. Function to check requirements for the module. Throws a WP error object for error or returns on success. * @param {Function} [settings.screenWidgetContext] Optional. Get the registered context name for a given module. */ registerModule: createValidatedAction( ( slug ) => { invariant( slug, 'module slug is required' ); }, function* ( slug, { storeName, name, description, features, Icon, order, homepage, SettingsEditComponent, SettingsViewComponent, SetupComponent, SettingsSetupIncompleteComponent, checkRequirements = () => true, screenWidgetContext, } = {} ) { const settings = { storeName, name, description, features, Icon, order, homepage, SettingsEditComponent, SettingsViewComponent, SetupComponent, SettingsSetupIncompleteComponent, checkRequirements, screenWidgetContext, }; yield { payload: { settings, slug, }, type: REGISTER_MODULE, }; const registry = yield Data.commonActions.getRegistry(); // As we can specify a custom checkRequirements function here, we're invalidating the resolvers for activation checks. yield registry.dispatch( STORE_NAME ).invalidateResolution( 'canActivateModule', [ slug ] ); yield registry.dispatch( STORE_NAME ).invalidateResolution( 'getCheckRequirementsError', [ slug ] ); } ), /** * Receives the check requirements error map for specified modules modules. * * @since 1.22.0 * @private * * @param {string} slug Module slug. * @param {Object} error WordPress Error object containing code, message and data properties. * @return {Object} Action for RECEIVE_CHECK_REQUIREMENTS_ERROR. */ receiveCheckRequirementsError( slug, error ) { invariant( slug, 'slug is required' ); invariant( isPlainObject( error ), 'error is required and must be an object' ); return { payload: { slug, error }, type: RECEIVE_CHECK_REQUIREMENTS_ERROR, }; }, /** * Receives the check requirements success for a module. * * @since 1.22.0 * @private * * @param {string} slug Success for a module slug. * @return {Object} Action for RECEIVE_CHECK_REQUIREMENTS_SUCCESS. */ receiveCheckRequirementsSuccess( slug ) { invariant( slug, 'slug is required' ); return { payload: { slug, }, type: RECEIVE_CHECK_REQUIREMENTS_SUCCESS, }; }, }; export const baseControls = { [ REFETCH_AUTHENTICATION ]: createRegistryControl( ( { dispatch } ) => () => { return dispatch( CORE_USER ).fetchGetAuthentication(); } ), [ SELECT_MODULE_REAUTH_URL ]: createRegistryControl( ( { select } ) => ( { payload } ) => { const { slug } = payload; const storeName = select( STORE_NAME ).getModuleStoreName( slug ); // If a storeName wasn't specified on registerModule we assume there is no store for this module if ( ! storeName ) { return; } const getAdminReauthURL = select( storeName )?.getAdminReauthURL; if ( getAdminReauthURL ) { return getAdminReauthURL(); } return select( CORE_SITE ).getAdminURL( 'googlesitekit-dashboard' ); } ), }; const baseReducer = ( state, { type, payload } ) => { switch ( type ) { case REGISTER_MODULE: { const { slug, settings } = payload; if ( !! state.clientDefinitions[ slug ] ) { global.console.warn( `Could not register module with slug "${ slug }". Module "${ slug }" is already registered.` ); return state; } return { ...state, clientDefinitions: { ...state.clientDefinitions, [ slug ]: settings, }, }; } case RECEIVE_CHECK_REQUIREMENTS_ERROR: { const { slug, error } = payload; return { ...state, checkRequirementsResults: { ...state.checkRequirementsResults, [ slug ]: error, }, }; } case RECEIVE_CHECK_REQUIREMENTS_SUCCESS: { const { slug } = payload; return { ...state, checkRequirementsResults: { ...state.checkRequirementsResults, [ slug ]: true, }, }; } default: { return state; } } }; const baseResolvers = { *getModules() { const registry = yield Data.commonActions.getRegistry(); const existingModules = registry.select( STORE_NAME ).getModules(); if ( ! existingModules ) { yield fetchGetModulesStore.actions.fetchGetModules(); } }, *canActivateModule( slug ) { const registry = yield Data.commonActions.getRegistry(); yield Data.commonActions.await( registry.__experimentalResolveSelect( STORE_NAME ).getModules() ); const module = registry.select( STORE_NAME ).getModule( slug ); if ( ! module ) { return; } const inactiveModules = []; module.dependencies.forEach( ( dependencySlug ) => { const dependedentModule = registry.select( STORE_NAME ).getModule( dependencySlug ); if ( ! dependedentModule?.active ) { inactiveModules.push( dependedentModule.name ); } } ); // If we have inactive dependencies, there's no need to check if we can // activate the module until the dependencies have been activated. if ( inactiveModules.length ) { /* translators: Error message text. 1: A flattened list of module names. 2: A module name. */ const messageTemplate = __( 'You need to set up %1$s to gain access to %2$s.', 'google-site-kit' ); const errorMessage = sprintf( messageTemplate, listFormat( inactiveModules ), module.name ); yield baseActions.receiveCheckRequirementsError( slug, { code: ERROR_CODE_INSUFFICIENT_MODULE_DEPENDENCIES, message: errorMessage, data: { inactiveModules }, } ); } else { try { yield Data.commonActions.await( module.checkRequirements() ); yield baseActions.receiveCheckRequirementsSuccess( slug ); } catch ( error ) { yield baseActions.receiveCheckRequirementsError( slug, error ); } } }, }; const baseSelectors = { /** * Gets the list of modules registered for use with Site Kit. * * A module is a section of Site Kit that relates to a particular service, * like Google Analytics or Google PageSpeed modules. They can provide * admin-only features (like PageSpeed Insights), frontend-only features, * or both (eg. Analytics, which can install Analytics <script> tags in the * frontend, and show dashboards in the WordPress Admin). * * Returns an Object/map of objects, keyed by slug, with the following shape when successful: * ``` * slug: { * "slug": "tagmanager", * "name": "Tag Manager", * "description": "Tag Manager creates an easy to manage way to create tags on your site without updating code.", * "homepage": "https://tagmanager.google.com/", * "internal": false, * "active": false, * "connected": false, * "dependencies": [ * "analytics" * ], * "dependents": [] * } * ``` * * @since 1.8.0 * * @param {Object} state Data store's state. * @return {(Object|undefined)} Modules available on the site. */ getModules( state ) { const { clientDefinitions, serverDefinitions } = state; // Return `undefined` if modules haven't been loaded yet. if ( serverDefinitions === undefined ) { return undefined; } // `normalizeModules` must be called with stable arguments directly from state. // Redefining/spreading these will undermine the memoization! return normalizeModules( serverDefinitions, clientDefinitions ); }, /** * Gets a specific module by slug. * * Returns a specific module by its slug. * Returns `undefined` if state is still loading or if said module doesn't exist. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(Object|undefined)} A specific module object; `undefined` if state is still loading or if said module doesn't exist. */ getModule: createRegistrySelector( ( select ) => ( state, slug ) => { const modules = select( STORE_NAME ).getModules(); // Return `undefined` if modules haven't been loaded yet. if ( modules === undefined ) { return undefined; } // A module with this slug couldn't be found; return `null` to signify the // "not found" state. if ( modules[ slug ] === undefined ) { return null; } // This module exists, so let's return it. return modules[ slug ]; } ), /** * Gets a specific module icon by slug. * * Returns a specific module icon by its slug. * Returns `null` if state is still loading or if said module doesn't exist or doesn't have an icon. * * @since 1.23.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(WPComponent|undefined|null)} A specific module's icon; `undefined` if state is still loading; `null` if said module doesn't exist or doesn't have an icon. */ getModuleIcon: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if module with this slug isn't loaded yet. if ( module === undefined ) { return undefined; } // A module with this slug couldn't be found or the icon is not found for the module; return `null` to signify the // "module not found" or "icon not found" state if ( module === null || module.Icon === null ) { return null; } // This module and the icon exists, so let's return it. return module.Icon; } ), /** * Gets module dependency names by slug. * * Returns a list of modules that depend on this module. * Returns `undefined` if state is still loading or if said module doesn't exist. * * @since 1.20.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(Array|undefined)} An array of dependency module names; `undefined` if state is still loading. */ getModuleDependencyNames: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if module with this slug isn't loaded yet. if ( module === undefined ) { return undefined; } // A module with this slug couldn't be found; return `[]` to signify the // "not found" state. if ( module === null ) { return []; } // Module is found, return the names of the dependencies // Modules are already resolved after we getModule() so they can't be undefined. const modules = select( STORE_NAME ).getModules(); return module.dependencies.map( ( dependencySlug ) => modules[ dependencySlug ]?.name || dependencySlug ); } ), /** * Gets module dependant names by slug. * * Returns a list of modules on which this module depends. * Returns `undefined` if state is still loading or if said module doesn't exist. * * @since 1.20.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(Array|undefined)} An array of dependant module names; `undefined` if state is still loading. */ getModuleDependantNames: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if module with this slug isn't loaded yet. if ( module === undefined ) { return undefined; } // A module with this slug couldn't be found; return `[]` to signify the // "not found" state. if ( module === null ) { return []; } // Module is found, return the names of the dependants // Modules are already resolved after we getModule() so they can't be undefined. const modules = select( STORE_NAME ).getModules(); return module.dependants.map( ( dependantSlug ) => modules[ dependantSlug ]?.name || dependantSlug ); } ), /** * Gets module store name by slug. * * Returns the store name if preset or null if there is no store name for this module. * Returns `undefined` if state is still loading or if said module doesn't exist. * * @since 1.24.0 * * @param {string} slug Module slug. * @return {(string|null|undefined)} `string` of the store name if a name has been set for this module. * `null` if no store name was set. * `undefined` if state is still loading. */ getModuleStoreName: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if module with this slug isn't loaded yet. if ( module === undefined ) { return undefined; } // Return null if no store name was set if ( module === null ) { return null; } return module.storeName; } ), /** * Checks a module's activation status. * * Returns `true` if the module exists and is active. * Returns `false` if the module exists but is not active. * Returns `undefined` if state is still loading or if no module with that slug exists. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(boolean|null|undefined)} `true` when the module exists and is active. * `undefined` if state is still loading. * `null` if said module doesn't exist. */ isModuleActive: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if modules haven't been loaded yet. if ( module === undefined ) { return undefined; } // A module with this slug couldn't be found; return `null` to signify the // "not found" state. if ( module === null ) { return null; } return module.active; } ), /** * Checks whether a module is connected or not. * * Returns `true` if the module exists, is active and connected. * Returns `false` if the module exists but is either not active or not connected. * Returns `undefined` if state is still loading or if no module with that slug exists. * * @since 1.16.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(boolean|null|undefined)} `true` when the module exists, is active and connected, otherwise `false`. * `undefined` if state is still loading. * `null` if said module doesn't exist. */ isModuleConnected: createRegistrySelector( ( select ) => ( state, slug ) => { const module = select( STORE_NAME ).getModule( slug ); // Return `undefined` if modules haven't been loaded yet. if ( module === undefined ) { return undefined; } // A module with this slug couldn't be found; return `null` to signify the // "not found" state. if ( module === null ) { return null; } return module.active && module.connected; } ), /** * Checks if a module's status is changing. * * Returns `true` if the module exists and is changing its `active` flag. * Returns `false` if the module exists but is not changing its `active` flag. * Returns `undefined` if state is still loading or if no module with that slug exists. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(boolean|undefined)} Activation change status; `undefined` if state is still loading or if no module with that slug exists. */ isDoingSetModuleActivation: createRegistrySelector( ( select ) => ( state, slug ) => { // Return undefined if modules not loaded or invalid slug. if ( ! select( STORE_NAME ).getModule( slug ) ) { return undefined; } // Check if the module is being activated. if ( select( STORE_NAME ).isFetchingSetModuleActivation( slug, true ) ) { return true; } // Check if the module is being deactivated. if ( select( STORE_NAME ).isFetchingSetModuleActivation( slug, false ) ) { return true; } // Check if modules data still needs to be refreshed after activation // update. return state.isAwaitingModulesRefresh; } ), /** * Checks if we can activate a module with a given slug. * * Returns `true` if the module can be activated. * Returns `false` if the module can not be activated. * Returns `undefined` if slug can not be found in state. * * @since 1.22.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(boolean|undefined)} Can activate module status; `undefined` if state is still loading or if no module with that slug exists. */ canActivateModule( state, slug ) { invariant( slug, 'slug is required' ); const moduleRequirements = state.checkRequirementsResults[ slug ]; if ( moduleRequirements === undefined ) { return undefined; } return moduleRequirements === true; }, /** * Gets the module activation error for a given slug. * * Returns `null` if the module can be activated and there is no error. * Returns `object` containing code, message and optional data property if there is an activation error for a slug. * * @since 1.22.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(null|Object)} Activation error for a module slug; `null` if there is no error or an error object if we cannot activate a given module. */ getCheckRequirementsError: createRegistrySelector( ( select ) => ( state, slug ) => { invariant( slug, 'slug is required.' ); // Need to use registry selector here to ensure resolver is invoked. if ( select( STORE_NAME ).canActivateModule( slug ) ) { return null; } return state.checkRequirementsResults[ slug ]; } ), /** * Gets the module's screenWidgetContext. * * Returns `null` if there is no registered context string for the given module. * Returns `string` the registered context string, screenWidgetContext for the given module. * * @since 1.28.0 * * @param {Object} state Data store's state. * @param {string} moduleSlug Module slug. * @return {(null|string)} The module's registered context string, or null. */ getScreenWidgetContext: createRegistrySelector( ( select ) => ( state, moduleSlug ) => { invariant( moduleSlug, 'slug is required.' ); const modules = select( STORE_NAME ).getModules(); if ( ! modules ) { return null; } const screenWidgetContext = modules[ moduleSlug ]?.screenWidgetContext; return screenWidgetContext || null; } ), /** * Gets the module's list of features. * * Returns a list of features of this module. * * @since 1.30.0 * * @param {Object} state Data store's state. * @param {string} slug Module slug. * @return {(Array|undefined)} An array of features for the module; `undefined` if state is still loading. */ getModuleFeatures: createRegistrySelector( ( select ) => ( state, slug ) => { const modules = select( STORE_NAME ).getModules(); // Return `undefined` if modules haven't been loaded yet. if ( modules === undefined ) { return undefined; } return Array.isArray( modules[ slug ]?.features ) ? modules[ slug ].features : []; } ), }; const store = Data.combineStores( fetchGetModulesStore, fetchSetModuleActivationStore, { initialState: baseInitialState, actions: baseActions, controls: baseControls, reducer: baseReducer, resolvers: baseResolvers, selectors: baseSelectors, } ); export const initialState = store.initialState; export const actions = store.actions; export const controls = store.controls; export const reducer = store.reducer; export const resolvers = store.resolvers; export const selectors = store.selectors; export default store;
1
38,313
Not sure why these are up here can we move these down to be part of the `@return` tag instead? This is what we usually do (e.g. `getCurrentEntityURL`)
google-site-kit-wp
js
@@ -34,12 +34,14 @@ module Blacklight::Document # but extensions should call super and modify hash returned, to avoid # unintentionally erasing values provided by other extensions. def to_semantic_values - @semantic_value_hash ||= self.class.field_semantics.each_with_object(Hash.new([])) do |(key, field_name), hash| - value = self[field_name] + @semantic_value_hash ||= self.class.field_semantics.each_with_object(Hash.new([])) do |(key, field_names), hash| + ## + # Handles single string field_name or an array of field_names + value = Array.wrap(field_names).map { |field_name| self[field_name] }.flatten.compact # Make single and multi-values all arrays, so clients # don't have to know. - hash[key] = Array.wrap(value) unless value.nil? + hash[key] = value unless value.empty? end @semantic_value_hash ||= {}
1
# frozen_string_literal: true module Blacklight::Document module SemanticFields extend ActiveSupport::Concern module ClassMethods # Returns array of hashes of registered extensions. Each hash # has a :module_obj key and a :condition_proc key. Usually this # method is only used internally in #apply_extensions, but if you # Class-level method for accessing/setting semantic mappings # for solr stored fields. Can be set by local app, key is # a symbol for a semantic, value is a solr _stored_ field. # # Stored field can be single or multi-value. In some cases # clients may only use the first value from a multi-value field. # # Currently documented semantic tokens, not all may be # used by core BL, but some may be used by plugins present # or future. # :title, :author, :year, :language => User-presentable strings. def field_semantics @field_semantics ||= {} end end # Returns a hash keyed by semantic tokens, value is an array of # strings. (Array to handle multi-value fields). If no value(s) # available, empty array is returned. # # Default implementation here uses field_semantics # to just take values from Solr stored fields. # Extensions can over-ride this method to provide better/different lookup, # but extensions should call super and modify hash returned, to avoid # unintentionally erasing values provided by other extensions. def to_semantic_values @semantic_value_hash ||= self.class.field_semantics.each_with_object(Hash.new([])) do |(key, field_name), hash| value = self[field_name] # Make single and multi-values all arrays, so clients # don't have to know. hash[key] = Array.wrap(value) unless value.nil? end @semantic_value_hash ||= {} end end end
1
7,724
Layout/EmptyLinesAroundBlockBody: Extra empty line detected at block body beginning.
projectblacklight-blacklight
rb
@@ -0,0 +1,18 @@ +package org.openqa.selenium; + +/** + * Created by James Reed on 11/04/2016. + * Thrown to indicate that a click was attempted on an element but was intercepted by another + * element on top of it + */ +public class InterceptingElementException extends InvalidElementStateException { + + public InterceptingElementException(String message) { + super(message); + } + + public InterceptingElementException(String message, Throwable cause) { + super(message, cause); + } + +}
1
1
13,182
We keep who wrote the code anonymous.
SeleniumHQ-selenium
java
@@ -25,6 +25,12 @@ func (client *clientFake) NodeRegister(proposal dto_discovery.ServiceProposal) ( return nil } +func (client *clientFake) RegisterIdentity(identity dto_discovery.Identity) (err error) { + log.Info(MYSTERIUM_API_LOG_PREFIX, "Identity registered: ", identity) + + return nil +} + func (client *clientFake) NodeSendStats(nodeKey string, sessionStats []dto.SessionStats) (err error) { log.Info(MYSTERIUM_API_LOG_PREFIX, "Node stats sent: ", nodeKey)
1
package server import ( "github.com/mysterium/node/server/dto" "fmt" log "github.com/cihub/seelog" dto_discovery "github.com/mysterium/node/service_discovery/dto" ) func NewClientFake() Client { return &clientFake{ proposalsByProvider: make(map[string]dto_discovery.ServiceProposal, 0), } } type clientFake struct { proposalsByProvider map[string]dto_discovery.ServiceProposal } func (client *clientFake) NodeRegister(proposal dto_discovery.ServiceProposal) (err error) { client.proposalsByProvider[string(proposal.ProviderId)] = proposal log.Info(MYSTERIUM_API_LOG_PREFIX, "Fake node registered: ", proposal) return nil } func (client *clientFake) NodeSendStats(nodeKey string, sessionStats []dto.SessionStats) (err error) { log.Info(MYSTERIUM_API_LOG_PREFIX, "Node stats sent: ", nodeKey) return nil } func (client *clientFake) SessionCreate(nodeKey string) (session dto.Session, err error) { if proposal, ok := client.proposalsByProvider[nodeKey]; ok { session = dto.Session{ Id: nodeKey + "-session", ServiceProposal: proposal, } return } err = fmt.Errorf("Fake node not found: %s", nodeKey) return } func (client *clientFake) SessionSendStats(sessionId string, sessionStats dto.SessionStats) (err error) { log.Info(MYSTERIUM_API_LOG_PREFIX, "Session stats sent: ", sessionId) return nil }
1
9,654
Mention something in logs, that fake registration was used e.g. `Fake identity registered`
mysteriumnetwork-node
go
@@ -151,10 +151,12 @@ const baseActions = { return; } - registry.dispatch( STORE_NAME ).setPropertyID( propertyID ); - if ( PROPERTY_CREATE === propertyID ) { - registry.dispatch( STORE_NAME ).setProfileID( PROFILE_CREATE ); + registry.dispatch( STORE_NAME ).setSettings( { + propertyID, + profileID: PROFILE_CREATE, + } ); + return; }
1
/** * `modules/analytics` data store: properties. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import invariant from 'invariant'; /** * Internal dependencies */ import API from 'googlesitekit-api'; import Data from 'googlesitekit-data'; import { isValidAccountID, isValidPropertyID, parsePropertyID, isValidPropertySelection } from '../util'; import { STORE_NAME, PROPERTY_CREATE, PROFILE_CREATE } from './constants'; import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store'; const { createRegistrySelector, createRegistryControl } = Data; const fetchGetPropertiesProfilesStore = createFetchStore( { baseName: 'getPropertiesProfiles', controlCallback: ( { accountID } ) => { return API.get( 'modules', 'analytics', 'properties-profiles', { accountID }, { useCache: false, } ); }, reducerCallback: ( state, response, { accountID } ) => { // Actual properties, profiles are set by resolver with custom logic, // hence here we just set a flag. return { ...state, isAwaitingPropertiesProfilesCompletion: { ...state.isAwaitingPropertiesProfilesCompletion, [ accountID ]: true, }, }; }, argsToParams: ( accountID ) => { return { accountID }; }, validateParams: ( { accountID } = {} ) => { invariant( accountID, 'accountID is required.' ); }, } ); const fetchCreatePropertyStore = createFetchStore( { baseName: 'createProperty', controlCallback: ( { accountID } ) => { return API.set( 'modules', 'analytics', 'create-property', { accountID } ); }, reducerCallback: ( state, property, { accountID } ) => { return { ...state, properties: { ...state.properties, [ accountID ]: [ ...( state.properties[ accountID ] || [] ), property, ], }, }; }, argsToParams: ( accountID ) => { return { accountID }; }, validateParams: ( { accountID } = {} ) => { invariant( accountID, 'accountID is required.' ); }, } ); // Actions const RECEIVE_MATCHED_PROPERTY = 'RECEIVE_MATCHED_PROPERTY'; const RECEIVE_GET_PROPERTIES = 'RECEIVE_GET_PROPERTIES'; const RECEIVE_PROPERTIES_PROFILES_COMPLETION = 'RECEIVE_PROPERTIES_PROFILES_COMPLETION'; const WAIT_FOR_PROPERTIES = 'WAIT_FOR_PROPERTIES'; const baseInitialState = { properties: {}, isAwaitingPropertiesProfilesCompletion: {}, matchedProperty: undefined, }; const baseActions = { /** * Creates a new Analytics property. * * Creates a new Analytics property for an existing Google Analytics account. * * @since 1.8.0 * * @param {string} accountID Google Analytics account ID. * @return {Object} Object with `response` and `error`. */ *createProperty( accountID ) { invariant( accountID, 'accountID is required.' ); const { response, error } = yield fetchCreatePropertyStore.actions.fetchCreateProperty( accountID ); return { response, error }; }, /** * Adds a matchedProperty to the store. * * @since 1.8.0 * @private * * @param {Object} matchedProperty Property object. * @return {Object} Redux-style action. */ receiveMatchedProperty( matchedProperty ) { invariant( matchedProperty, 'matchedProperty is required.' ); return { payload: { matchedProperty }, type: RECEIVE_MATCHED_PROPERTY, }; }, /** * Sets the given property and related fields in the store. * * @since 1.8.0 * @private * * @param {string} propertyID Property ID to select. * @param {string} [internalPropertyID] Internal property ID (if available). * @return {Object} A Generator function. */ selectProperty( propertyID, internalPropertyID = '' ) { invariant( isValidPropertySelection( propertyID ), 'A valid propertyID selection is required.' ); return ( function* () { const registry = yield Data.commonActions.getRegistry(); const accountID = registry.select( STORE_NAME ).getAccountID(); if ( ! isValidAccountID( accountID ) ) { return; } registry.dispatch( STORE_NAME ).setPropertyID( propertyID ); if ( PROPERTY_CREATE === propertyID ) { registry.dispatch( STORE_NAME ).setProfileID( PROFILE_CREATE ); return; } yield baseActions.waitForProperties( accountID ); const property = registry.select( STORE_NAME ).getPropertyByID( propertyID ) || {}; if ( ! internalPropertyID ) { internalPropertyID = property.internalWebPropertyId; // eslint-disable-line sitekit/camelcase-acronyms } registry.dispatch( STORE_NAME ).setInternalWebPropertyID( internalPropertyID || '' ); // Clear any profile ID selection in the case that selection falls to the getProfiles resolver. registry.dispatch( STORE_NAME ).setProfileID( '' ); const profiles = registry.select( STORE_NAME ).getProfiles( accountID, propertyID ); if ( property.defaultProfileId && profiles?.some( ( profile ) => profile.id === property.defaultProfileId ) ) { // eslint-disable-line sitekit/camelcase-acronyms registry.dispatch( STORE_NAME ).setProfileID( property.defaultProfileId ); // eslint-disable-line sitekit/camelcase-acronyms return; } if ( profiles === undefined ) { return; // Selection will happen in in getProfiles resolver. } const matchedProfile = profiles.find( ( { webPropertyId } ) => webPropertyId === propertyID ) || { id: PROFILE_CREATE }; // eslint-disable-line sitekit/camelcase-acronyms registry.dispatch( STORE_NAME ).setProfileID( matchedProfile.id ); }() ); }, receiveGetProperties( properties, { accountID } ) { invariant( Array.isArray( properties ), 'properties must be an array.' ); invariant( accountID, 'accountID is required.' ); return { payload: { properties, accountID }, type: RECEIVE_GET_PROPERTIES, }; }, receivePropertiesProfilesCompletion( accountID ) { invariant( accountID, 'accountID is required.' ); return { payload: { accountID }, type: RECEIVE_PROPERTIES_PROFILES_COMPLETION, }; }, waitForProperties( accountID ) { return { payload: { accountID }, type: WAIT_FOR_PROPERTIES, }; }, }; const baseControls = { [ WAIT_FOR_PROPERTIES ]: createRegistryControl( ( registry ) => ( { payload: { accountID } } ) => { const arePropertiesLoaded = () => registry.select( STORE_NAME ).getProperties( accountID ) !== undefined; if ( arePropertiesLoaded() ) { return true; } return new Promise( ( resolve ) => { const unsubscribe = registry.subscribe( () => { if ( arePropertiesLoaded() ) { unsubscribe(); resolve(); } } ); } ); } ), }; const baseReducer = ( state, { type, payload } ) => { switch ( type ) { case RECEIVE_MATCHED_PROPERTY: { const { matchedProperty } = payload; return { ...state, matchedProperty, }; } case RECEIVE_GET_PROPERTIES: { const { properties, accountID } = payload; return { ...state, properties: { ...state.properties, [ accountID ]: [ ...properties ], }, }; } case RECEIVE_PROPERTIES_PROFILES_COMPLETION: { const { accountID } = payload; return { ...state, isAwaitingPropertiesProfilesCompletion: { ...state.isAwaitingPropertiesProfilesCompletion, [ accountID ]: false, }, }; } default: { return state; } } }; const baseResolvers = { *getProperties( accountID ) { if ( ! isValidAccountID( accountID ) ) { return; } const registry = yield Data.commonActions.getRegistry(); let properties = registry.select( STORE_NAME ).getProperties( accountID ); // Only fetch properties if there are none in the store for the given account. if ( properties === undefined ) { const { response, error } = yield fetchGetPropertiesProfilesStore.actions.fetchGetPropertiesProfiles( accountID ); const { dispatch } = registry; if ( response ) { dispatch( STORE_NAME ).receiveGetProperties( response.properties, { accountID } ); // eslint-disable-next-line sitekit/camelcase-acronyms if ( response.profiles?.[ 0 ]?.webPropertyId ) { // eslint-disable-next-line sitekit/camelcase-acronyms const propertyID = response.profiles[ 0 ].webPropertyId; dispatch( STORE_NAME ).receiveGetProfiles( response.profiles, { accountID, propertyID } ); } if ( response.matchedProperty ) { dispatch( STORE_NAME ).receiveMatchedProperty( response.matchedProperty ); } ( { properties } = response ); } dispatch( STORE_NAME ).receivePropertiesProfilesCompletion( accountID ); if ( error ) { // Store error manually since getProperties signature differs from fetchGetPropertiesProfiles. yield dispatch( STORE_NAME ).receiveError( error, 'getProperties', [ accountID ] ); return; } } const propertyID = registry.select( STORE_NAME ).getPropertyID(); if ( ! propertyID ) { const property = properties[ 0 ] || { id: PROPERTY_CREATE }; yield baseActions.selectProperty( property.id, property.internalWebPropertyId ); // eslint-disable-line sitekit/camelcase-acronyms } }, }; const baseSelectors = { /** * Gets the property object by the property ID. * * @since 1.8.0 * @private * * @param {Object} state Data store's state. * @param {string} propertyID Property ID. * @return {(Object|undefined)} Property object, or undefined if not present in store. */ getPropertyByID( state, propertyID ) { if ( ! isValidPropertyID( propertyID ) ) { return undefined; } const { accountID } = parsePropertyID( propertyID ); return ( state.properties[ accountID ] || [] ).find( ( { id } ) => id === propertyID ); }, /** * Gets the matched property, if any. * * @since 1.8.0 * @private * * @param {Object} state Data store's state. * @return {(Object|undefined)} Matched property if set, otherwise `undefined`. */ getMatchedProperty( state ) { return state.matchedProperty; }, /** * Gets all Google Analytics properties this account can access. * * Returns an array of all analytics properties. * * Returns `undefined` if accounts have not yet loaded. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} accountID The Analytics Account ID to fetch properties for. * @return {(Array.<Object>|undefined)} An array of Analytics properties; `undefined` if not loaded. */ getProperties( state, accountID ) { const { properties } = state; return properties[ accountID ]; }, /** * Checks if a property is being created for an account. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} accountID The Analytics Account ID to check for property creation. * @return {boolean} `true` if creating a property, `false` if not. */ isDoingCreateProperty: createRegistrySelector( ( select ) => ( state, accountID ) => { return select( STORE_NAME ).isFetchingCreateProperty( accountID ); } ), /** * Checks if properties are being fetched for the given account. * * @since 1.8.0 * * @param {Object} state Data store's state. * @param {string} accountID The Analytics Account ID to check for property creation. * @return {boolean} `true` if fetching a properties, `false` if not. */ isDoingGetProperties: createRegistrySelector( ( select ) => ( state, accountID ) => { // Check if dispatch calls right after fetching are still awaiting. if ( accountID && state.isAwaitingPropertiesProfilesCompletion[ accountID ] ) { return true; } return select( STORE_NAME ).isFetchingGetPropertiesProfiles( accountID ); } ), }; const store = Data.combineStores( fetchGetPropertiesProfilesStore, fetchCreatePropertyStore, { initialState: baseInitialState, actions: baseActions, controls: baseControls, reducer: baseReducer, resolvers: baseResolvers, selectors: baseSelectors, } ); export const initialState = store.initialState; export const actions = store.actions; export const controls = store.controls; export const reducer = store.reducer; export const resolvers = store.resolvers; export const selectors = store.selectors; export default store;
1
33,553
I think it would make sense to also set `internalWebPropertyID` to `''` here. Otherwise, it might keep its previous value (let's say a property was selected before, but then the user chooses "Create new property"), but since it always should depend on `propertyID`, that would be incorrect.
google-site-kit-wp
js
@@ -779,3 +779,18 @@ TEST_CASE("setDoubleBondNeighborDirections()", "[stereochemistry,bug]") { CHECK(MolToSmiles(*m) == "C/C=C\\C"); } } + +TEST_CASE("github #2782: addHs() fails on atoms with 'bad' valences", "[bug]") { + SECTION("basics") { + SmilesParserParams ps; + ps.sanitize = false; + std::unique_ptr<RWMol> m( + static_cast<RWMol *>(SmilesToMol("C=C1=CC=CC=C1", ps))); + REQUIRE(m); + bool strict = false; + m->updatePropertyCache(strict); + CHECK(m->getNumAtoms() == 7); + MolOps::addHs(*m); + CHECK(m->getNumAtoms() == 14); + } +}
1
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do // this in one cpp file #include "catch.hpp" #include <GraphMol/RDKitBase.h> #include <GraphMol/new_canon.h> #include <GraphMol/RDKitQueries.h> #include <GraphMol/Chirality.h> #include <GraphMol/FileParsers/FileParsers.h> #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include <GraphMol/SmilesParse/SmartsWrite.h> using namespace RDKit; #if 1 TEST_CASE("SMILES Parsing works", "[molops]") { std::unique_ptr<RWMol> mol(SmilesToMol("C1CC1")); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 3); } TEST_CASE("Sanitization tests", "[molops]") { std::unique_ptr<RWMol> mol(SmilesToMol("C1=CC=CC=C1Cc2ccccc2", false, false)); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 13); SECTION("properties") { mol->updatePropertyCache(); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 1); CHECK(!mol->getAtomWithIdx(0)->getIsAromatic()); CHECK(mol->getAtomWithIdx(10)->getIsAromatic()); SECTION("aromaticity") { unsigned int opThatFailed; MolOps::sanitizeMol(*mol, opThatFailed, MolOps::SANITIZE_SETAROMATICITY); // mol->debugMol(std::cerr); CHECK(mol->getAtomWithIdx(10)->getIsAromatic()); // blocked by #1730 // CHECK(mol->getAtomWithIdx(0)->getIsAromatic()); } SECTION("kekulize") { unsigned int opThatFailed; MolOps::sanitizeMol(*mol, opThatFailed, MolOps::SANITIZE_KEKULIZE); CHECK(!mol->getAtomWithIdx(0)->getIsAromatic()); CHECK(!mol->getAtomWithIdx(10)->getIsAromatic()); } } } TEST_CASE("Github #2062", "[bug, molops]") { SmilesParserParams ps; ps.removeHs = false; ps.sanitize = true; std::unique_ptr<RWMol> mol(SmilesToMol("[C:1][C:2]([H:3])([H])[O:4][H]", ps)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 6); mol->getAtomWithIdx(1)->setProp("intProp", 42); MolOps::mergeQueryHs(*mol); CHECK(mol->getNumAtoms() == 3); SECTION("basics") { CHECK(mol->getAtomWithIdx(1)->getAtomMapNum() == 2); } SECTION("other props") { REQUIRE(mol->getAtomWithIdx(1)->hasProp("intProp")); CHECK(mol->getAtomWithIdx(1)->getProp<int>("intProp") == 42); } } TEST_CASE("Github #2086", "[bug, molops]") { SECTION("reported version") { auto mol = "C1CCCC1"_smiles; REQUIRE(mol); MolOps::addHs(*mol); REQUIRE(mol->getNumAtoms() == 15); mol->removeBond(4, 13); MolOps::removeHs(*mol); REQUIRE(mol->getNumAtoms() == 6); } } TEST_CASE("github #299", "[bug, molops, SSSR]") { SECTION("simplified") { auto mol = "C13%13%14.C124%18.C25%13%15.C368%17.C4679.C75%10%17.C8%11%14%16.C9%11%12%18.C%10%12%15%16"_smiles; REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 9); } SECTION("old example from molopstest") { auto mol = "C123C45C11C44C55C22C33C14C523"_smiles; REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 9); } SECTION("carborane") { std::unique_ptr<RWMol> mol( SmilesToMol("[B]1234[B]567[B]118[B]229[B]33%10[B]454[B]656[B]711[B]822[" "C]933[B]%1045[C]6123", 0, false)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 12); mol->updatePropertyCache(false); MolOps::findSSSR(*mol); REQUIRE(mol->getRingInfo()->isInitialized()); } SECTION("original report from ChEbI") { std::string pathName = getenv("RDBASE"); pathName += "/Code/GraphMol/test_data/"; std::unique_ptr<RWMol> mol( MolFileToMol(pathName + "ChEBI_50252.mol", false)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 80); mol->updatePropertyCache(false); MolOps::findSSSR(*mol); REQUIRE(mol->getRingInfo()->isInitialized()); } } TEST_CASE("github #2224", "[bug, molops, removeHs, query]") { SECTION("the original report") { std::string pathName = getenv("RDBASE"); pathName += "/Code/GraphMol/test_data/"; std::unique_ptr<RWMol> mol(MolFileToMol(pathName + "github2224_1.mol")); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 7); } SECTION("basics") { SmilesParserParams ps; ps.removeHs = false; ps.sanitize = true; std::unique_ptr<ROMol> mol(SmilesToMol("C[H]", ps)); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 2); { // The H without a query is removed std::unique_ptr<ROMol> m2(MolOps::removeHs(*mol)); CHECK(m2->getNumAtoms() == 1); } { // but if we add a query feature it's not removed RWMol m2(*mol); auto *qa = new QueryAtom(1); m2.replaceAtom(1, qa); m2.getAtomWithIdx(1)->setAtomicNum(1); MolOps::removeHs(m2); CHECK(m2.getNumAtoms() == 2); delete qa; } } } TEST_CASE( "github #2268: Recognize N in three-membered rings as potentially chiral", "[bug,stereo]") { SECTION("basics: N in a 3 ring") { const auto mol = "C[N@]1CC1C"_smiles; REQUIRE(mol); CHECK(mol->getAtomWithIdx(1)->getChiralTag() != Atom::CHI_UNSPECIFIED); } SECTION("basics: N in a 4 ring") { const auto mol = "C[N@]1CCC1C"_smiles; REQUIRE(mol); CHECK(mol->getAtomWithIdx(1)->getChiralTag() == Atom::CHI_UNSPECIFIED); } SECTION("the original molecule") { std::string mb = R"CTAB( Mrv1810 02131915062D 18 20 0 0 1 0 999 V2000 -0.7207 -1.3415 0.0000 N 0 0 1 0 0 0 0 0 0 0 0 0 -0.0583 -0.8416 0.0000 C 0 0 2 0 0 0 0 0 0 0 0 0 -0.0083 -1.7540 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -1.3956 -0.8666 0.0000 C 0 0 2 0 0 0 0 0 0 0 0 0 -0.3250 -0.0667 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.1955 -0.6499 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.1499 -0.0792 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6541 -0.4292 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.7830 -1.2291 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -1.6081 -1.6623 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.4080 0.1500 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 1.3665 -0.8374 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6416 0.3958 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -3.1996 0.3708 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -3.4121 1.1624 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.3498 0.8207 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.0790 -0.4167 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.0665 0.4083 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2 1 1 0 0 0 0 1 3 1 1 0 0 0 4 1 1 0 0 0 0 5 2 1 0 0 0 0 4 6 1 0 0 0 0 7 4 1 0 0 0 0 2 8 1 6 0 0 0 9 6 2 0 0 0 0 4 10 1 1 0 0 0 11 6 1 0 0 0 0 12 8 2 0 0 0 0 13 8 1 0 0 0 0 14 11 1 0 0 0 0 15 14 1 0 0 0 0 16 13 2 0 0 0 0 17 12 1 0 0 0 0 18 16 1 0 0 0 0 2 3 1 0 0 0 0 5 7 1 0 0 0 0 17 18 2 0 0 0 0 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getChiralTag() != Atom::CHI_UNSPECIFIED); } } TEST_CASE("github #2244", "[bug, molops, stereo]") { SECTION("the original report") { auto mol = "CC=CC=CC"_smiles; REQUIRE(mol); MolOps::findPotentialStereoBonds(*mol, true); CHECK(mol->getBondWithIdx(1)->getStereo() == Bond::STEREOANY); CHECK(mol->getBondWithIdx(3)->getStereo() == Bond::STEREOANY); mol->getBondWithIdx(3)->setStereo(Bond::STEREONONE); MolOps::findPotentialStereoBonds(*mol, true); CHECK(mol->getBondWithIdx(1)->getStereo() == Bond::STEREOANY); CHECK(mol->getBondWithIdx(3)->getStereo() == Bond::STEREOANY); } } TEST_CASE( "github #2258: heterocycles with exocyclic bonds not failing valence check", "[bug, molops]") { SECTION("the original report") { std::vector<std::string> smiles = {"C=n1ccnc1", "C#n1ccnc1"}; for (auto smi : smiles) { CHECK_THROWS_AS(SmilesToMol(smi), MolSanitizeException); } } } TEST_CASE("github #908: AddHs() using 3D coordinates with 2D conformations", "[bug, molops]") { SECTION("basics: single atom mols") { std::vector<std::string> smiles = {"Cl", "O", "N", "C"}; for (auto smi : smiles) { // std::cerr << smi << std::endl; std::unique_ptr<RWMol> mol(SmilesToMol(smi)); REQUIRE(mol); auto conf = new Conformer(1); conf->set3D(false); conf->setAtomPos(0, RDGeom::Point3D(0, 0, 0)); mol->addConformer(conf, true); bool explicitOnly = false; bool addCoords = true; MolOps::addHs(*mol, explicitOnly, addCoords); for (size_t i = 0; i < mol->getNumAtoms(); ++i) { // std::cerr << " " << i << " " << conf->getAtomPos(i) << std::endl; CHECK(conf->getAtomPos(i).z == 0.0); } } } } #endif TEST_CASE( "github #2437: Canon::rankMolAtoms results in crossed double bonds in " "rings", "[bug, molops]") { SECTION("underlying problem") { std::string molb = R"CTAB(testmol Mrv1824 05081910082D 4 4 0 0 0 0 999 V2000 6.9312 -8.6277 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 6.9312 -9.4527 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 7.7562 -8.6277 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 7.7562 -9.4527 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 1 3 1 0 0 0 0 3 4 1 0 0 0 0 2 4 2 0 0 0 0 M END )CTAB"; bool sanitize = false; bool removeHs = false; std::unique_ptr<RWMol> mol(MolBlockToMol(molb, sanitize, removeHs)); REQUIRE(mol); mol->updatePropertyCache(); CHECK(mol->getBondWithIdx(3)->getBondType() == Bond::BondType::DOUBLE); CHECK(mol->getBondWithIdx(3)->getBondDir() == Bond::BondDir::NONE); std::vector<unsigned int> ranks; CHECK(!mol->getRingInfo()->isInitialized()); Canon::rankMolAtoms(*mol, ranks); CHECK(!mol->getRingInfo()->isInitialized()); } SECTION("as discovered") { std::string molb = R"CTAB(testmol Mrv1824 05081910082D 4 4 0 0 0 0 999 V2000 6.9312 -8.6277 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 6.9312 -9.4527 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 7.7562 -8.6277 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 7.7562 -9.4527 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 1 3 1 0 0 0 0 3 4 1 0 0 0 0 2 4 2 0 0 0 0 M END )CTAB"; bool sanitize = false; bool removeHs = false; std::unique_ptr<RWMol> mol(MolBlockToMol(molb, sanitize, removeHs)); REQUIRE(mol); mol->updatePropertyCache(); CHECK(mol->getBondWithIdx(3)->getBondType() == Bond::BondType::DOUBLE); CHECK(mol->getBondWithIdx(3)->getBondDir() == Bond::BondDir::NONE); auto nmb = MolToMolBlock(*mol); CHECK(nmb.find("2 4 2 3") == std::string::npos); CHECK(nmb.find("2 4 2 0") != std::string::npos); std::vector<unsigned int> ranks; Canon::rankMolAtoms(*mol, ranks); nmb = MolToMolBlock(*mol); CHECK(nmb.find("2 4 2 3") == std::string::npos); CHECK(nmb.find("2 4 2 0") != std::string::npos); } } TEST_CASE( "github #2423: Incorrect assignment of explicit Hs to Al+3 read from mol " "block", "[bug, molops]") { SECTION("basics: single atom mols") { std::string mb = R"CTAB(2300 -OEChem-01301907122D 1 0 0 0 0 0 0 0 0999 V2000 -66.7000 999.0000 0.0000 Al 0 1 0 0 0 0 0 0 0 0 0 0 M CHG 1 1 3 M END)CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 3); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } } TEST_CASE("Specialized exceptions for sanitization errors", "[molops]") { SECTION("AtomValenceException") { std::vector<std::pair<std::string, unsigned int>> smiles = { {"C=n1ccnc1", 1}, {"CCO(C)C", 2}}; for (auto pr : smiles) { CHECK_THROWS_AS(SmilesToMol(pr.first), AtomValenceException); try { auto m = SmilesToMol(pr.first); } catch (const AtomValenceException &e) { CHECK(e.getType() == "AtomValenceException"); CHECK(e.getAtomIdx() == pr.second); } } } SECTION("AtomKekulizeException") { std::vector<std::pair<std::string, unsigned int>> smiles = { {"CCcc", 2}, {"C1:c:CC1", 0}}; for (auto pr : smiles) { CHECK_THROWS_AS(SmilesToMol(pr.first), AtomKekulizeException); try { auto m = SmilesToMol(pr.first); } catch (const AtomKekulizeException &e) { CHECK(e.getType() == "AtomKekulizeException"); CHECK(e.getAtomIdx() == pr.second); } } } SECTION("KekulizeException") { std::vector<std::pair<std::string, std::vector<unsigned int>>> smiles = { {"c1cccc1", {0, 1, 2, 3, 4}}, {"Cc1cc1", {1, 2, 3}}}; for (auto pr : smiles) { CHECK_THROWS_AS(SmilesToMol(pr.first), KekulizeException); try { auto m = SmilesToMol(pr.first); } catch (const KekulizeException &e) { CHECK(e.getType() == "KekulizeException"); CHECK(e.getAtomIndices() == pr.second); } } } } TEST_CASE("detectChemistryProblems", "[molops]") { SECTION("Basics") { SmilesParserParams ps; ps.sanitize = false; auto m = std::unique_ptr<ROMol>(SmilesToMol("CO(C)CFCc1cc1", ps)); REQUIRE(m); auto res = MolOps::detectChemistryProblems(*m); REQUIRE(res.size() == 3); CHECK(res[0]->getType() == "AtomValenceException"); REQUIRE(dynamic_cast<AtomValenceException *>(res[0].get())); CHECK(dynamic_cast<AtomSanitizeException *>(res[0].get())->getAtomIdx() == 1); CHECK(res[1]->getType() == "AtomValenceException"); REQUIRE(dynamic_cast<AtomSanitizeException *>(res[1].get())); CHECK(dynamic_cast<AtomSanitizeException *>(res[1].get())->getAtomIdx() == 4); CHECK(res[2]->getType() == "KekulizeException"); REQUIRE(dynamic_cast<KekulizeException *>(res[2].get())); CHECK(dynamic_cast<KekulizeException *>(res[2].get())->getAtomIndices() == std::vector<unsigned int>({6, 7, 8})); } SECTION("No problems") { SmilesParserParams ps; ps.sanitize = false; auto m = std::unique_ptr<ROMol>(SmilesToMol("c1ccccc1", ps)); REQUIRE(m); auto res = MolOps::detectChemistryProblems(*m); REQUIRE(res.size() == 0); } } TEST_CASE( "github #2606: Bad valence corrections on Pb, Sn" "[bug, molops]") { SECTION("basics-Pb") { std::string mb = R"CTAB( Mrv1810 08141905562D 5 0 0 0 0 0 999 V2000 -3.6316 -0.4737 0.0000 Pb 0 0 0 0 0 0 0 0 0 0 0 0 -3.6541 0.3609 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -2.4586 -0.5188 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -3.6992 -1.5338 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -4.5789 -0.4286 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 M CHG 5 1 4 2 -1 3 -1 4 -1 5 -1 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 4); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } SECTION("basics-Sn") { std::string mb = R"CTAB( Mrv1810 08141905562D 5 0 0 0 0 0 999 V2000 -3.6316 -0.4737 0.0000 Sn 0 0 0 0 0 0 0 0 0 0 0 0 -3.6541 0.3609 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -2.4586 -0.5188 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -3.6992 -1.5338 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -4.5789 -0.4286 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 M CHG 5 1 4 2 -1 3 -1 4 -1 5 -1 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 4); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } SECTION("basics-Ge") { std::string mb = R"CTAB( Mrv1810 08141905562D 5 0 0 0 0 0 999 V2000 -3.6316 -0.4737 0.0000 Ge 0 0 0 0 0 0 0 0 0 0 0 0 -3.6541 0.3609 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -2.4586 -0.5188 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -3.6992 -1.5338 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -4.5789 -0.4286 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 M CHG 5 1 4 2 -1 3 -1 4 -1 5 -1 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 4); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } } TEST_CASE( "github #2607: Pb, Sn should support valence 2" "[bug, molops]") { SECTION("basics-Pb") { std::string mb = R"CTAB( Mrv1810 08141905562D 3 0 0 0 0 0 999 V2000 -3.6316 -0.4737 0.0000 Pb 0 0 0 0 0 0 0 0 0 0 0 0 -3.6541 0.3609 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -2.4586 -0.5188 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 M CHG 3 1 2 2 -1 3 -1 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 2); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } SECTION("basics-Sn") { std::string mb = R"CTAB( Mrv1810 08141905562D 3 0 0 0 0 0 999 V2000 -3.6316 -0.4737 0.0000 Sn 0 0 0 0 0 0 0 0 0 0 0 0 -3.6541 0.3609 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 -2.4586 -0.5188 0.0000 O 0 5 0 0 0 0 0 0 0 0 0 0 M CHG 3 1 2 2 -1 3 -1 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getFormalCharge() == 2); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 0); } } TEST_CASE( "github #2649: Allenes read from mol blocks have crossed bonds assigned" "[bug, stereochemistry]") { SECTION("basics") { std::string mb = R"CTAB(mol Mrv1824 09191901002D 6 5 0 0 0 0 999 V2000 -1.6986 -7.4294 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.2522 -6.8245 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.1438 -8.0357 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.8095 -6.2156 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.3374 -7.8470 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -3.6162 -6.3886 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 3 2 0 0 0 0 2 1 2 0 0 0 0 3 5 1 0 0 0 0 4 2 2 0 0 0 0 6 4 1 0 0 0 0 M END)CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREONONE); CHECK(mol->getBondWithIdx(1)->getStereo() == Bond::STEREONONE); CHECK(mol->getBondWithIdx(3)->getStereo() == Bond::STEREONONE); auto outmolb = MolToMolBlock(*mol); // std::cerr<<outmolb<<std::endl; CHECK(outmolb.find("1 3 2 0") != std::string::npos); CHECK(outmolb.find("2 1 2 0") != std::string::npos); CHECK(outmolb.find("4 2 2 0") != std::string::npos); } } TEST_CASE( "GitHub 2712: setBondStereoFromDirections() returning incorrect results" "[stereochemistry]") { SECTION("basics 1a") { std::string mb = R"CTAB( Mrv1810 10141909562D 4 3 0 0 0 0 999 V2000 3.3412 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.5162 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.1037 -3.7112 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.7537 -2.2823 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 1 4 1 0 0 0 0 M END )CTAB"; bool sanitize = false; std::unique_ptr<ROMol> mol(MolBlockToMol(mb, sanitize)); REQUIRE(mol); CHECK(mol->getBondWithIdx(0)->getBondType() == Bond::DOUBLE); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREONONE); MolOps::setBondStereoFromDirections(*mol); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREOTRANS); } SECTION("basics 1b") { std::string mb = R"CTAB( Mrv1810 10141909562D 4 3 0 0 0 0 999 V2000 3.3412 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.5162 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.1037 -3.7112 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.7537 -2.2823 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 4 1 1 0 0 0 0 M END )CTAB"; bool sanitize = false; std::unique_ptr<ROMol> mol(MolBlockToMol(mb, sanitize)); REQUIRE(mol); CHECK(mol->getBondWithIdx(0)->getBondType() == Bond::DOUBLE); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREONONE); MolOps::setBondStereoFromDirections(*mol); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREOTRANS); } SECTION("basics 2a") { std::string mb = R"CTAB( Mrv1810 10141909582D 4 3 0 0 0 0 999 V2000 3.4745 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.6495 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.2370 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.8870 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 1 4 1 0 0 0 0 M END )CTAB"; bool sanitize = false; std::unique_ptr<ROMol> mol(MolBlockToMol(mb, sanitize)); REQUIRE(mol); CHECK(mol->getBondWithIdx(0)->getBondType() == Bond::DOUBLE); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREONONE); MolOps::setBondStereoFromDirections(*mol); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREOCIS); } SECTION("basics 2b") { std::string mb = R"CTAB( Mrv1810 10141909582D 4 3 0 0 0 0 999 V2000 3.4745 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.6495 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.2370 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.8870 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 4 1 1 0 0 0 0 M END )CTAB"; bool sanitize = false; std::unique_ptr<ROMol> mol(MolBlockToMol(mb, sanitize)); REQUIRE(mol); CHECK(mol->getBondWithIdx(0)->getBondType() == Bond::DOUBLE); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREONONE); MolOps::setBondStereoFromDirections(*mol); CHECK(mol->getBondWithIdx(0)->getStereo() == Bond::STEREOCIS); } } TEST_CASE("removeHs screwing up double bond stereo", "[bug,removeHs]") { SECTION("example1") { std::string molblock = R"CTAB(molblock = """ SciTegic12221702182D 47 51 0 0 0 0 999 V2000 0.2962 6.2611 0.0000 C 0 0 -3.9004 4.4820 0.0000 C 0 0 1.4195 5.2670 0.0000 C 0 0 -3.8201 -7.4431 0.0000 C 0 0 -4.9433 -6.4490 0.0000 C 0 0 -2.3975 -6.9674 0.0000 C 0 0 3.5921 -3.5947 0.0000 C 0 0 -3.1475 2.3700 0.0000 C 0 0 2.1695 -4.0705 0.0000 C 0 0 -2.0242 1.3759 0.0000 C 0 0 -4.6440 -4.9792 0.0000 C 0 0 2.7681 -1.1308 0.0000 C 0 0 -5.8626 1.1332 0.0000 C 0 0 3.0674 0.3391 0.0000 C 0 0 3.6660 3.2787 0.0000 C 0 0 8.1591 -0.6978 0.0000 C 0 0 7.3351 1.7662 0.0000 C 0 0 -6.3876 3.5028 0.0000 C 0 0 -0.6756 -5.0219 0.0000 C 0 0 7.0358 0.2964 0.0000 C 0 0 3.8914 -2.1249 0.0000 C 0 0 -2.0982 -5.4976 0.0000 C 0 0 -4.5701 1.8943 0.0000 C 0 0 1 0 0 0 -6.9859 2.1273 0.0000 C 0 0 1 0 0 0 4.4900 0.8148 0.0000 C 0 0 1.3455 -1.6065 0.0000 C 0 0 4.7893 2.2846 0.0000 C 0 0 1.9442 1.3332 0.0000 C 0 0 1.0462 -3.0763 0.0000 C 0 0 2.2435 2.8030 0.0000 C 0 0 -0.6017 1.8516 0.0000 C 0 0 5.6132 -0.1794 0.0000 C 0 0 0.2223 -0.6124 0.0000 Cl 0 0 9.2823 -1.6919 0.0000 N 0 0 -3.2215 -4.5035 0.0000 N 0 0 6.2119 2.7603 0.0000 N 0 0 5.3139 -1.6492 0.0000 N 0 0 0.5216 0.8575 0.0000 N 0 0 -4.8945 3.3588 0.0000 N 0 0 -8.2913 2.8662 0.0000 O 0 0 -0.3024 3.3214 0.0000 O 0 0 1.1202 3.7971 0.0000 O 0 0 -0.3763 -3.5520 0.0000 O 0 0 -2.8482 3.8398 0.0000 H 0 0 -2.3235 -0.0940 0.0000 H 0 0 -3.9483 0.5292 0.0000 H 0 0 -7.8572 0.9063 0.0000 H 0 0 1 3 1 0 2 39 1 0 3 42 1 0 4 5 2 0 4 6 1 0 5 11 1 0 6 22 2 0 7 9 2 0 7 21 1 0 8 44 1 0 8 10 2 0 8 23 1 0 9 29 1 0 10 45 1 0 10 31 1 0 11 35 2 0 12 21 2 0 12 26 1 0 13 23 1 0 13 24 1 0 14 25 2 0 14 28 1 0 15 27 2 0 15 30 1 0 16 20 1 0 16 34 3 0 17 20 2 0 17 36 1 0 18 24 1 0 18 39 1 0 19 22 1 0 19 43 1 0 20 32 1 0 21 37 1 0 22 35 1 0 23 46 1 6 23 39 1 0 24 47 1 1 24 40 1 0 25 27 1 0 25 32 1 0 26 29 2 0 26 33 1 0 27 36 1 0 28 30 2 0 28 38 1 0 29 43 1 0 30 42 1 0 31 38 2 0 31 41 1 0 32 37 2 3 M END """ )CTAB"; bool sanitize = false; bool removeHs = false; std::unique_ptr<RWMol> m(MolBlockToMol(molblock, sanitize, removeHs)); REQUIRE(m); m->updatePropertyCache(); MolOps::setBondStereoFromDirections(*m); CHECK(m->getBondWithIdx(10)->getBondType() == Bond::DOUBLE); CHECK(m->getBondWithIdx(10)->getStereo() == Bond::STEREOTRANS); REQUIRE(m->getBondWithIdx(10)->getStereoAtoms().size() == 2); CHECK(m->getBondWithIdx(10)->getStereoAtoms()[0] == 43); CHECK(m->getBondWithIdx(10)->getStereoAtoms()[1] == 44); MolOps::removeHs(*m); // implicitOnly,updateExplicitCount,sanitize); // m->debugMol(std::cerr); CHECK(m->getBondWithIdx(9)->getBondType() == Bond::DOUBLE); CHECK(m->getBondWithIdx(9)->getStereo() == Bond::STEREOTRANS); REQUIRE(m->getBondWithIdx(9)->getStereoAtoms().size() == 2); CHECK(m->getBondWithIdx(9)->getStereoAtoms()[0] == 22); CHECK(m->getBondWithIdx(9)->getStereoAtoms()[1] == 30); } } TEST_CASE("setDoubleBondNeighborDirections()", "[stereochemistry,bug]") { SECTION("basics") { auto m = "CC=CC"_smiles; REQUIRE(m); m->getBondWithIdx(1)->getStereoAtoms() = {0, 3}; m->getBondWithIdx(1)->setStereo(Bond::STEREOCIS); MolOps::setDoubleBondNeighborDirections(*m); CHECK(m->getBondWithIdx(0)->getBondDir() == Bond::ENDUPRIGHT); CHECK(m->getBondWithIdx(2)->getBondDir() == Bond::ENDDOWNRIGHT); CHECK(MolToSmiles(*m) == "C/C=C\\C"); } }
1
20,063
It might be reasonable to add a check that sanitization still fails here.
rdkit-rdkit
cpp
@@ -602,9 +602,8 @@ public class JobRunner extends EventHandler implements Runnable { } final int attemptNo = this.node.getAttempt(); - logInfo("Finishing job " + this.jobId + " attempt: " + attemptNo + " at " + logInfo("Finishing job " + this.jobId + (this.node.getAttempt() > 0 ? (" retry: " + attemptNo) : "") + " at " + this.node.getEndTime() + " with status " + this.node.getStatus()); - fireEvent(Event.create(this, Type.JOB_FINISHED, new EventData(finalStatus, this.node.getNestedId())), false); finalizeLogFile(attemptNo);
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.execapp; import azkaban.Constants; import azkaban.event.Event; import azkaban.event.Event.Type; import azkaban.event.EventData; import azkaban.event.EventHandler; import azkaban.execapp.event.BlockingStatus; import azkaban.execapp.event.FlowWatcher; import azkaban.executor.ExecutableFlowBase; import azkaban.executor.ExecutableNode; import azkaban.executor.ExecutorLoader; import azkaban.executor.ExecutorManagerException; import azkaban.executor.Status; import azkaban.flow.CommonJobProperties; import azkaban.jobExecutor.AbstractProcessJob; import azkaban.jobExecutor.JavaProcessJob; import azkaban.jobExecutor.Job; import azkaban.jobtype.JobTypeManager; import azkaban.jobtype.JobTypeManagerException; import azkaban.utils.ExternalLinkUtils; import azkaban.utils.PatternLayoutEscaped; import azkaban.utils.Props; import azkaban.utils.StringUtils; import azkaban.utils.UndefinedPropertyException; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Optional; import java.util.Set; import org.apache.kafka.log4jappender.KafkaLog4jAppender; import org.apache.log4j.Appender; import org.apache.log4j.EnhancedPatternLayout; import org.apache.log4j.FileAppender; import org.apache.log4j.Layout; import org.apache.log4j.Logger; import org.apache.log4j.RollingFileAppender; public class JobRunner extends EventHandler implements Runnable { public static final String AZKABAN_WEBSERVER_URL = "azkaban.webserver.url"; private static final Logger serverLogger = Logger.getLogger(JobRunner.class); private static final Object logCreatorLock = new Object(); private final Layout DEFAULT_LAYOUT = new EnhancedPatternLayout( "%d{dd-MM-yyyy HH:mm:ss z} %c{1} %p - %m\n"); private final Object syncObject = new Object(); private final JobTypeManager jobtypeManager; private final ExecutorLoader loader; private final Props props; private final Props azkabanProps; private final ExecutableNode node; private final File workingDir; private final Layout loggerLayout = this.DEFAULT_LAYOUT; private final String jobId; private final Set<String> pipelineJobs = new HashSet<>(); private Logger logger = null; private Logger flowLogger = null; private Appender jobAppender = null; private Optional<Appender> kafkaAppender = Optional.empty(); private File logFile; private String attachmentFileName; private Job job; private int executionId = -1; // Used by the job to watch and block against another flow private Integer pipelineLevel = null; private FlowWatcher watcher = null; private Set<String> proxyUsers = null; private String jobLogChunkSize; private int jobLogBackupIndex; private long delayStartMs = 0; private boolean killed = false; private BlockingStatus currentBlockStatus = null; public JobRunner(final ExecutableNode node, final File workingDir, final ExecutorLoader loader, final JobTypeManager jobtypeManager, final Props azkabanProps) { this.props = node.getInputProps(); this.node = node; this.workingDir = workingDir; this.executionId = node.getParentFlow().getExecutionId(); this.jobId = node.getId(); this.loader = loader; this.jobtypeManager = jobtypeManager; this.azkabanProps = azkabanProps; } public static String createLogFileName(final ExecutableNode node, final int attempt) { final int executionId = node.getExecutableFlow().getExecutionId(); String jobId = node.getId(); if (node.getExecutableFlow() != node.getParentFlow()) { // Posix safe file delimiter jobId = node.getPrintableId("._."); } return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId + ".log" : "_job." + executionId + "." + jobId + ".log"; } public static String createLogFileName(final ExecutableNode node) { return JobRunner.createLogFileName(node, node.getAttempt()); } public static String createMetaDataFileName(final ExecutableNode node, final int attempt) { final int executionId = node.getExecutableFlow().getExecutionId(); String jobId = node.getId(); if (node.getExecutableFlow() != node.getParentFlow()) { // Posix safe file delimiter jobId = node.getPrintableId("._."); } return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId + ".meta" : "_job." + executionId + "." + jobId + ".meta"; } public static String createMetaDataFileName(final ExecutableNode node) { return JobRunner.createMetaDataFileName(node, node.getAttempt()); } public static String createAttachmentFileName(final ExecutableNode node) { return JobRunner.createAttachmentFileName(node, node.getAttempt()); } public static String createAttachmentFileName(final ExecutableNode node, final int attempt) { final int executionId = node.getExecutableFlow().getExecutionId(); String jobId = node.getId(); if (node.getExecutableFlow() != node.getParentFlow()) { // Posix safe file delimiter jobId = node.getPrintableId("._."); } return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId + ".attach" : "_job." + executionId + "." + jobId + ".attach"; } public void setValidatedProxyUsers(final Set<String> proxyUsers) { this.proxyUsers = proxyUsers; } public void setLogSettings(final Logger flowLogger, final String logFileChuckSize, final int numLogBackup) { this.flowLogger = flowLogger; this.jobLogChunkSize = logFileChuckSize; this.jobLogBackupIndex = numLogBackup; } public Props getProps() { return this.props; } public void setPipeline(final FlowWatcher watcher, final int pipelineLevel) { this.watcher = watcher; this.pipelineLevel = pipelineLevel; if (this.pipelineLevel == 1) { this.pipelineJobs.add(this.node.getNestedId()); } else if (this.pipelineLevel == 2) { this.pipelineJobs.add(this.node.getNestedId()); final ExecutableFlowBase parentFlow = this.node.getParentFlow(); if (parentFlow.getEndNodes().contains(this.node.getId())) { if (!parentFlow.getOutNodes().isEmpty()) { final ExecutableFlowBase grandParentFlow = parentFlow.getParentFlow(); for (final String outNode : parentFlow.getOutNodes()) { final ExecutableNode nextNode = grandParentFlow.getExecutableNode(outNode); // If the next node is a nested flow, then we add the nested // starting nodes if (nextNode instanceof ExecutableFlowBase) { final ExecutableFlowBase nextFlow = (ExecutableFlowBase) nextNode; findAllStartingNodes(nextFlow, this.pipelineJobs); } else { this.pipelineJobs.add(nextNode.getNestedId()); } } } } else { for (final String outNode : this.node.getOutNodes()) { final ExecutableNode nextNode = parentFlow.getExecutableNode(outNode); // If the next node is a nested flow, then we add the nested starting // nodes if (nextNode instanceof ExecutableFlowBase) { final ExecutableFlowBase nextFlow = (ExecutableFlowBase) nextNode; findAllStartingNodes(nextFlow, this.pipelineJobs); } else { this.pipelineJobs.add(nextNode.getNestedId()); } } } } } private void findAllStartingNodes(final ExecutableFlowBase flow, final Set<String> pipelineJobs) { for (final String startingNode : flow.getStartNodes()) { final ExecutableNode node = flow.getExecutableNode(startingNode); if (node instanceof ExecutableFlowBase) { findAllStartingNodes((ExecutableFlowBase) node, pipelineJobs); } else { pipelineJobs.add(node.getNestedId()); } } } /** * Returns a list of jobs that this JobRunner will wait upon to finish before starting. It is only * relevant if pipeline is turned on. */ public Set<String> getPipelineWatchedJobs() { return this.pipelineJobs; } public long getDelayStart() { return this.delayStartMs; } public void setDelayStart(final long delayMS) { this.delayStartMs = delayMS; } public ExecutableNode getNode() { return this.node; } public String getJobId() { return this.node.getId(); } public String getLogFilePath() { return this.logFile == null ? null : this.logFile.getPath(); } private void createLogger() { // Create logger synchronized (logCreatorLock) { final String loggerName = System.currentTimeMillis() + "." + this.executionId + "." + this.jobId; this.logger = Logger.getLogger(loggerName); try { attachFileAppender(createFileAppender()); } catch (final IOException e) { removeAppender(this.jobAppender); this.flowLogger.error("Could not open log file in " + this.workingDir + " for job " + this.jobId, e); } if (this.props.getBoolean(Constants.JobProperties.AZKABAN_JOB_LOGGING_KAFKA_ENABLE, false)) { // Only attempt appender construction if required properties are present if (this.azkabanProps .containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST) && this.azkabanProps .containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_TOPIC)) { try { attachKafkaAppender(createKafkaAppender()); } catch (final Exception e) { removeAppender(this.kafkaAppender); this.flowLogger.error("Failed to create Kafka appender for job " + this.jobId, e); } } else { this.flowLogger.info( "Kafka appender not created as brokerlist or topic not provided by executor server"); } } } final String externalViewer = ExternalLinkUtils .getExternalLogViewer(this.azkabanProps, this.jobId, this.props); if (!externalViewer.isEmpty()) { this.logger.info("See logs at: " + externalViewer); } } private void attachFileAppender(final FileAppender appender) { // If present, remove the existing file appender assert (this.jobAppender == null); this.jobAppender = appender; this.logger.addAppender(this.jobAppender); this.logger.setAdditivity(false); this.flowLogger.info("Attached file appender for job " + this.jobId); } private FileAppender createFileAppender() throws IOException { // Set up log files final String logName = createLogFileName(this.node); this.logFile = new File(this.workingDir, logName); final String absolutePath = this.logFile.getAbsolutePath(); // Attempt to create FileAppender final RollingFileAppender fileAppender = new RollingFileAppender(this.loggerLayout, absolutePath, true); fileAppender.setMaxBackupIndex(this.jobLogBackupIndex); fileAppender.setMaxFileSize(this.jobLogChunkSize); this.flowLogger.info("Created file appender for job " + this.jobId); return fileAppender; } private void createAttachmentFile() { final String fileName = createAttachmentFileName(this.node); final File file = new File(this.workingDir, fileName); this.attachmentFileName = file.getAbsolutePath(); } private void attachKafkaAppender(final KafkaLog4jAppender appender) { // This should only be called once assert (!this.kafkaAppender.isPresent()); this.kafkaAppender = Optional.of(appender); this.logger.addAppender(this.kafkaAppender.get()); this.logger.setAdditivity(false); this.flowLogger.info("Attached new Kafka appender for job " + this.jobId); } private KafkaLog4jAppender createKafkaAppender() throws UndefinedPropertyException { final KafkaLog4jAppender kafkaProducer = new KafkaLog4jAppender(); kafkaProducer.setSyncSend(false); kafkaProducer.setBrokerList(this.azkabanProps .getString(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST)); kafkaProducer.setTopic( this.azkabanProps .getString(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_TOPIC)); final String layoutString = LogUtil.createLogPatternLayoutJsonString(this.props, this.jobId); kafkaProducer.setLayout(new PatternLayoutEscaped(layoutString)); kafkaProducer.activateOptions(); this.flowLogger.info("Created kafka appender for " + this.jobId); return kafkaProducer; } private void removeAppender(final Optional<Appender> appender) { if (appender.isPresent()) { removeAppender(appender.get()); } } private void removeAppender(final Appender appender) { if (appender != null) { this.logger.removeAppender(appender); appender.close(); } } private void closeLogger() { if (this.jobAppender != null) { removeAppender(this.jobAppender); } if (this.kafkaAppender.isPresent()) { removeAppender(this.kafkaAppender); } } private void writeStatus() { try { this.node.setUpdateTime(System.currentTimeMillis()); this.loader.updateExecutableNode(this.node); } catch (final ExecutorManagerException e) { this.flowLogger.error("Could not update job properties in db for " + this.jobId, e); } } /** * Used to handle non-ready and special status's (i.e. KILLED). Returns true if they handled * anything. */ private boolean handleNonReadyStatus() { Status nodeStatus = this.node.getStatus(); boolean quickFinish = false; final long time = System.currentTimeMillis(); if (Status.isStatusFinished(nodeStatus)) { quickFinish = true; } else if (nodeStatus == Status.DISABLED) { nodeStatus = changeStatus(Status.SKIPPED, time); quickFinish = true; } else if (this.isKilled()) { nodeStatus = changeStatus(Status.KILLED, time); quickFinish = true; } if (quickFinish) { this.node.setStartTime(time); fireEvent( Event.create(this, Type.JOB_STARTED, new EventData(nodeStatus, this.node.getNestedId()))); this.node.setEndTime(time); fireEvent( Event .create(this, Type.JOB_FINISHED, new EventData(nodeStatus, this.node.getNestedId()))); return true; } return false; } /** * If pipelining is set, will block on another flow's jobs. */ private boolean blockOnPipeLine() { if (this.isKilled()) { return true; } // For pipelining of jobs. Will watch other jobs. if (!this.pipelineJobs.isEmpty()) { String blockedList = ""; final ArrayList<BlockingStatus> blockingStatus = new ArrayList<>(); for (final String waitingJobId : this.pipelineJobs) { final Status status = this.watcher.peekStatus(waitingJobId); if (status != null && !Status.isStatusFinished(status)) { final BlockingStatus block = this.watcher.getBlockingStatus(waitingJobId); blockingStatus.add(block); blockedList += waitingJobId + ","; } } if (!blockingStatus.isEmpty()) { this.logger.info("Pipeline job " + this.jobId + " waiting on " + blockedList + " in execution " + this.watcher.getExecId()); for (final BlockingStatus bStatus : blockingStatus) { this.logger.info("Waiting on pipelined job " + bStatus.getJobId()); this.currentBlockStatus = bStatus; bStatus.blockOnFinishedStatus(); if (this.isKilled()) { this.logger.info("Job was killed while waiting on pipeline. Quiting."); return true; } else { this.logger.info("Pipelined job " + bStatus.getJobId() + " finished."); } } } } this.currentBlockStatus = null; return false; } private boolean delayExecution() { if (this.isKilled()) { return true; } final long currentTime = System.currentTimeMillis(); if (this.delayStartMs > 0) { this.logger.info("Delaying start of execution for " + this.delayStartMs + " milliseconds."); synchronized (this) { try { this.wait(this.delayStartMs); this.logger.info("Execution has been delayed for " + this.delayStartMs + " ms. Continuing with execution."); } catch (final InterruptedException e) { this.logger.error("Job " + this.jobId + " was to be delayed for " + this.delayStartMs + ". Interrupted after " + (System.currentTimeMillis() - currentTime)); } } if (this.isKilled()) { this.logger.info("Job was killed while in delay. Quiting."); return true; } } return false; } private void finalizeLogFile(final int attemptNo) { closeLogger(); if (this.logFile == null) { this.flowLogger.info("Log file for job " + this.jobId + " is null"); return; } try { final File[] files = this.logFile.getParentFile().listFiles(new FilenameFilter() { @Override public boolean accept(final File dir, final String name) { return name.startsWith(JobRunner.this.logFile.getName()); } }); Arrays.sort(files, Collections.reverseOrder()); this.loader.uploadLogFile(this.executionId, this.node.getNestedId(), attemptNo, files); } catch (final ExecutorManagerException e) { this.flowLogger.error( "Error writing out logs for job " + this.node.getNestedId(), e); } } private void finalizeAttachmentFile() { if (this.attachmentFileName == null) { this.flowLogger.info("Attachment file for job " + this.jobId + " is null"); return; } try { final File file = new File(this.attachmentFileName); if (!file.exists()) { this.flowLogger.info("No attachment file for job " + this.jobId + " written."); return; } this.loader.uploadAttachmentFile(this.node, file); } catch (final ExecutorManagerException e) { this.flowLogger.error( "Error writing out attachment for job " + this.node.getNestedId(), e); } } /** * The main run thread. */ @Override public void run() { try { doRun(); } catch (final Exception e) { serverLogger.error("Unexpected exception", e); throw e; } } private void doRun() { Thread.currentThread().setName( "JobRunner-" + this.jobId + "-" + this.executionId); // If the job is cancelled, disabled, killed. No log is created in this case if (handleNonReadyStatus()) { return; } createAttachmentFile(); createLogger(); boolean errorFound = false; // Delay execution if necessary. Will return a true if something went wrong. errorFound |= delayExecution(); // For pipelining of jobs. Will watch other jobs. Will return true if // something went wrong. errorFound |= blockOnPipeLine(); // Start the node. this.node.setStartTime(System.currentTimeMillis()); Status finalStatus = this.node.getStatus(); uploadExecutableNode(); if (!errorFound && !isKilled()) { fireEvent(Event.create(this, Type.JOB_STARTED, new EventData(this.node))); final Status prepareStatus = prepareJob(); if (prepareStatus != null) { // Writes status to the db writeStatus(); fireEvent(Event.create(this, Type.JOB_STATUS_CHANGED, new EventData(prepareStatus, this.node.getNestedId()))); finalStatus = runJob(); } else { finalStatus = changeStatus(Status.FAILED); logError("Job run failed preparing the job."); } } this.node.setEndTime(System.currentTimeMillis()); if (isKilled()) { // even if it's killed, there is a chance that the job failed is marked as // failure, // So we set it to KILLED to make sure we know that we forced kill it // rather than // it being a legitimate failure. finalStatus = changeStatus(Status.KILLED); } final int attemptNo = this.node.getAttempt(); logInfo("Finishing job " + this.jobId + " attempt: " + attemptNo + " at " + this.node.getEndTime() + " with status " + this.node.getStatus()); fireEvent(Event.create(this, Type.JOB_FINISHED, new EventData(finalStatus, this.node.getNestedId())), false); finalizeLogFile(attemptNo); finalizeAttachmentFile(); writeStatus(); } private void uploadExecutableNode() { try { this.loader.uploadExecutableNode(this.node, this.props); } catch (final ExecutorManagerException e) { this.logger.error("Error writing initial node properties", e); } } private Status prepareJob() throws RuntimeException { // Check pre conditions if (this.props == null || this.isKilled()) { logError("Failing job. The job properties don't exist"); return null; } final Status finalStatus; synchronized (this.syncObject) { if (this.node.getStatus() == Status.FAILED || this.isKilled()) { return null; } if (this.node.getAttempt() > 0) { logInfo("Starting job " + this.jobId + " attempt " + this.node.getAttempt() + " at " + this.node.getStartTime()); } else { logInfo("Starting job " + this.jobId + " at " + this.node.getStartTime()); } // If it's an embedded flow, we'll add the nested flow info to the job // conf if (this.node.getExecutableFlow() != this.node.getParentFlow()) { final String subFlow = this.node.getPrintableId(":"); this.props.put(CommonJobProperties.NESTED_FLOW_PATH, subFlow); } insertJobMetadata(); insertJVMAargs(); this.props.put(CommonJobProperties.JOB_ID, this.jobId); this.props.put(CommonJobProperties.JOB_ATTEMPT, this.node.getAttempt()); this.props.put(CommonJobProperties.JOB_METADATA_FILE, createMetaDataFileName(this.node)); this.props.put(CommonJobProperties.JOB_ATTACHMENT_FILE, this.attachmentFileName); finalStatus = changeStatus(Status.RUNNING); // Ability to specify working directory if (!this.props.containsKey(AbstractProcessJob.WORKING_DIR)) { this.props.put(AbstractProcessJob.WORKING_DIR, this.workingDir.getAbsolutePath()); } if (this.props.containsKey("user.to.proxy")) { final String jobProxyUser = this.props.getString("user.to.proxy"); if (this.proxyUsers != null && !this.proxyUsers.contains(jobProxyUser)) { this.logger.error("User " + jobProxyUser + " has no permission to execute this job " + this.jobId + "!"); return null; } } try { this.job = this.jobtypeManager.buildJobExecutor(this.jobId, this.props, this.logger); } catch (final JobTypeManagerException e) { this.logger.error("Failed to build job type", e); return null; } } return finalStatus; } /** * Add useful JVM arguments so it is easier to map a running Java process to a flow, execution id * and job */ private void insertJVMAargs() { final String flowName = this.node.getParentFlow().getFlowId(); final String jobId = this.node.getId(); String jobJVMArgs = String.format( "-Dazkaban.flowid=%s -Dazkaban.execid=%s -Dazkaban.jobid=%s", flowName, this.executionId, jobId); final String previousJVMArgs = this.props.get(JavaProcessJob.JVM_PARAMS); jobJVMArgs += (previousJVMArgs == null) ? "" : " " + previousJVMArgs; this.logger.info("job JVM args: " + jobJVMArgs); this.props.put(JavaProcessJob.JVM_PARAMS, jobJVMArgs); } /** * Add relevant links to the job properties so that downstream consumers may know what executions * initiated their execution. */ private void insertJobMetadata() { final String baseURL = this.azkabanProps.get(AZKABAN_WEBSERVER_URL); if (baseURL != null) { final String flowName = this.node.getParentFlow().getFlowId(); final String projectName = this.node.getParentFlow().getProjectName(); this.props.put(CommonJobProperties.AZKABAN_URL, baseURL); this.props.put(CommonJobProperties.EXECUTION_LINK, String.format("%s/executor?execid=%d", baseURL, this.executionId)); this.props.put(CommonJobProperties.JOBEXEC_LINK, String.format( "%s/executor?execid=%d&job=%s", baseURL, this.executionId, this.jobId)); this.props.put(CommonJobProperties.ATTEMPT_LINK, String.format( "%s/executor?execid=%d&job=%s&attempt=%d", baseURL, this.executionId, this.jobId, this.node.getAttempt())); this.props.put(CommonJobProperties.WORKFLOW_LINK, String.format( "%s/manager?project=%s&flow=%s", baseURL, projectName, flowName)); this.props.put(CommonJobProperties.JOB_LINK, String.format( "%s/manager?project=%s&flow=%s&job=%s", baseURL, projectName, flowName, this.jobId)); } else { if (this.logger != null) { this.logger.info(AZKABAN_WEBSERVER_URL + " property was not set"); } } // out nodes this.props.put(CommonJobProperties.OUT_NODES, StringUtils.join2(this.node.getOutNodes(), ",")); // in nodes this.props.put(CommonJobProperties.IN_NODES, StringUtils.join2(this.node.getInNodes(), ",")); } private Status runJob() { Status finalStatus = this.node.getStatus(); try { this.job.run(); } catch (final Throwable e) { if (this.props.getBoolean("job.succeed.on.failure", false)) { finalStatus = changeStatus(Status.FAILED_SUCCEEDED); logError("Job run failed, but will treat it like success."); logError(e.getMessage() + " cause: " + e.getCause(), e); } else { if (isKilled() || this.node.getStatus() == Status.KILLED) { finalStatus = Status.KILLED; logError("Job run killed!", e); } else { finalStatus = changeStatus(Status.FAILED); logError("Job run failed!", e); } logError(e.getMessage() + " cause: " + e.getCause()); } } if (this.job != null) { this.node.setOutputProps(this.job.getJobGeneratedProperties()); } // If the job is still running, set the status to Success. if (!Status.isStatusFinished(finalStatus)) { finalStatus = changeStatus(Status.SUCCEEDED); } return finalStatus; } private Status changeStatus(final Status status) { changeStatus(status, System.currentTimeMillis()); return status; } private Status changeStatus(final Status status, final long time) { this.node.setStatus(status); this.node.setUpdateTime(time); return status; } private void fireEvent(final Event event) { fireEvent(event, true); } private void fireEvent(final Event event, final boolean updateTime) { if (updateTime) { this.node.setUpdateTime(System.currentTimeMillis()); } this.fireEventListeners(event); } public void killBySLA() { kill(); this.getNode().setKilledBySLA(true); } public void kill() { synchronized (this.syncObject) { if (Status.isStatusFinished(this.node.getStatus())) { return; } logError("Kill has been called."); this.killed = true; final BlockingStatus status = this.currentBlockStatus; if (status != null) { status.unblock(); } // Cancel code here if (this.job == null) { logError("Job hasn't started yet."); // Just in case we're waiting on the delay synchronized (this) { this.notify(); } return; } try { this.job.cancel(); } catch (final Exception e) { logError(e.getMessage()); logError( "Failed trying to cancel job. Maybe it hasn't started running yet or just finished."); } this.changeStatus(Status.KILLED); } } public boolean isKilled() { return this.killed; } public Status getStatus() { return this.node.getStatus(); } private void logError(final String message) { if (this.logger != null) { this.logger.error(message); } } private void logError(final String message, final Throwable t) { if (this.logger != null) { this.logger.error(message, t); } } private void logInfo(final String message) { if (this.logger != null) { this.logger.info(message); } } public File getLogFile() { return this.logFile; } public Logger getLogger() { return this.logger; } }
1
14,251
Why not use attemptNo instead of another call to getAttempt? Even better, consider using a private method to calculate this string. This logic is in two places.
azkaban-azkaban
java
@@ -2,7 +2,7 @@ module ProjectAssociations extend ActiveSupport::Concern included do - has_many :links, -> { where(deleted: false) } + has_many :links, -> { where("links.deleted = 'f'") } has_one :permission, as: :target has_many :analyses has_many :analysis_summaries, through: :analyses
1
module ProjectAssociations extend ActiveSupport::Concern included do has_many :links, -> { where(deleted: false) } has_one :permission, as: :target has_many :analyses has_many :analysis_summaries, through: :analyses has_many :taggings, as: :taggable has_many :tags, through: :taggings belongs_to :best_analysis, foreign_key: :best_analysis_id, class_name: :Analysis has_many :aliases, -> { where(deleted: false).where.not(preferred_name_id: nil) } has_many :contributions has_many :positions has_many :stack_entries, -> { where(deleted_at: nil) } has_many :stacks, -> { where(deleted_at: nil).where.not(arel_table[:account_id].eq(nil)) }, through: :stack_entries belongs_to :logo belongs_to :organization has_many :manages, -> { where(deleted_at: nil, deleted_by: nil) }, as: 'target' has_many :managers, through: :manages, source: :account has_many :rss_subscriptions, -> { where(deleted: false) } has_many :rss_feeds, through: :rss_subscriptions has_many :reviews has_many :ratings has_many :kudos has_many :jobs belongs_to :forge, class_name: 'Forge::Base' has_one :koders_status has_many :enlistments, -> { where(deleted: false) } has_many :repositories, through: :enlistments has_many :project_licenses, -> { where(deleted: false) } has_many :licenses, -> { order('lower(licenses.nice_name)') }, through: :project_licenses has_many :duplicates, -> { order(created_at: :desc) }, class_name: 'Duplicate', foreign_key: 'good_project_id' has_one :is_a_duplicate, -> { where.not(resolved: true) }, class_name: 'Duplicate', foreign_key: 'bad_project_id' has_many :named_commits, ->(proj) { where(analysis_id: (proj.best_analysis_id || 0)) } has_many :commit_flags, -> { order(time: :desc).where('commit_flags.sloc_set_id = named_commits.sloc_set_id') }, through: :named_commits accepts_nested_attributes_for :enlistments accepts_nested_attributes_for :project_licenses scope :by_collection, ->(ids, sort, query) { collection_arel(ids, sort, query) } def assign_editor_account_to_associations [aliases, enlistments, project_licenses, links].flatten.each { |obj| obj.editor_account = editor_account } end def rss_articles RssArticle.joins(rss_feed: :rss_subscriptions) .where("rss_subscriptions.project_id = #{id} and rss_subscriptions.deleted = false") .order('time DESC') end class << self def collection_arel(ids = nil, sort = nil, query = nil) if !ids.blank? where(id: ids.split(',')).order(:id) else tsearch(query, respond_to?("by_#{sort}") ? "by_#{sort}" : nil) end end end end end
1
7,979
Yuck. Rails bug.
blackducksoftware-ohloh-ui
rb
@@ -927,6 +927,15 @@ class Packet(six.with_metaclass(Packet_metaclass, BasePacket)): return self.payload.answers(other.payload) return 0 + def layers(self): + """returns a list of layer classes (including subclasses) in this packet""" # noqa: E501 + layers = [] + l = self + while l: + layers.append(l.__class__) + l = l.payload.getlayer(0, _subclass=True) + return layers + def haslayer(self, cls): """true if self has a layer that is an instance of cls. Superseded by "cls in self" syntax.""" # noqa: E501 if self.__class__ == cls or cls in [self.__class__.__name__,
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Packet class. Binding mechanism. fuzz() method. """ from __future__ import absolute_import from __future__ import print_function import os import re import time import itertools import copy import subprocess from scapy.fields import StrField, ConditionalField, Emph, PacketListField, \ BitField, MultiEnumField, EnumField, FlagsField from scapy.config import conf from scapy.consts import WINDOWS from scapy.compat import * from scapy.base_classes import BasePacket, Gen, SetGen, Packet_metaclass from scapy.volatile import VolatileValue from scapy.utils import import_hexcap, tex_escape, colgen, get_temp_file, \ issubtype, ContextManagerSubprocess from scapy.error import Scapy_Exception, log_runtime from scapy.extlib import PYX import scapy.modules.six as six try: import pyx except ImportError: pass class RawVal: def __init__(self, val=""): self.val = val def __str__(self): return str(self.val) def __bytes__(self): return raw(self.val) def __repr__(self): return "<RawVal [%r]>" % self.val class Packet(six.with_metaclass(Packet_metaclass, BasePacket)): __slots__ = [ "time", "sent_time", "name", "default_fields", "overload_fields", "overloaded_fields", "fields", "fieldtype", "packetfields", "original", "explicit", "raw_packet_cache", "raw_packet_cache_fields", "_pkt", "post_transforms", # then payload and underlayer "payload", "underlayer", "name", # used for sr() "_answered", # used when sniffing "direction", "sniffed_on", # handle snaplen Vs real length "wirelen", # used while performing advanced dissection to handle padding "_tmp_dissect_pos", ] name = None fields_desc = [] overload_fields = {} payload_guess = [] show_indent = 1 show_summary = True @classmethod def from_hexcap(cls): return cls(import_hexcap()) @classmethod def upper_bonds(self): for fval, upper in self.payload_guess: print("%-20s %s" % (upper.__name__, ", ".join("%-12s" % ("%s=%r" % i) for i in six.iteritems(fval)))) # noqa: E501 @classmethod def lower_bonds(self): for lower, fval in six.iteritems(self._overload_fields): print("%-20s %s" % (lower.__name__, ", ".join("%-12s" % ("%s=%r" % i) for i in six.iteritems(fval)))) # noqa: E501 def _unpickle(self, dlist): """Used to unpack pickling""" self.__init__(b"".join(dlist)) return self def __reduce__(self): """Used by pickling methods""" return (self.__class__, (), (self.build(),)) def __reduce_ex__(self, proto): """Used by pickling methods""" return self.__reduce__() def __getstate__(self): """Mark object as pickable""" return self.__reduce__()[2] def __setstate__(self, state): """Rebuild state using pickable methods""" return self._unpickle(state) def __deepcopy__(self, memo): """Used by copy.deepcopy""" return self.copy() def __init__(self, _pkt=b"", post_transform=None, _internal=0, _underlayer=None, **fields): # noqa: E501 self.time = time.time() self.sent_time = None self.name = (self.__class__.__name__ if self._name is None else self._name) self.default_fields = {} self.overload_fields = self._overload_fields self.overloaded_fields = {} self.fields = {} self.fieldtype = {} self.packetfields = [] self.payload = NoPayload() self.init_fields() self.underlayer = _underlayer self.original = _pkt self.explicit = 0 self.raw_packet_cache = None self.raw_packet_cache_fields = None self.wirelen = None if _pkt: self.dissect(_pkt) if not _internal: self.dissection_done(self) for f, v in six.iteritems(fields): self.fields[f] = self.get_field(f).any2i(self, v) if isinstance(post_transform, list): self.post_transforms = post_transform elif post_transform is None: self.post_transforms = [] else: self.post_transforms = [post_transform] def init_fields(self): """ Initialize each fields of the fields_desc dict """ self.do_init_fields(self.fields_desc) def do_init_fields(self, flist): """ Initialize each fields of the fields_desc dict """ for f in flist: self.default_fields[f.name] = copy.deepcopy(f.default) self.fieldtype[f.name] = f if f.holds_packets: self.packetfields.append(f) def dissection_done(self, pkt): """DEV: will be called after a dissection is completed""" self.post_dissection(pkt) self.payload.dissection_done(pkt) def post_dissection(self, pkt): """DEV: is called after the dissection of the whole packet""" pass def get_field(self, fld): """DEV: returns the field instance from the name of the field""" return self.fieldtype[fld] def add_payload(self, payload): if payload is None: return elif not isinstance(self.payload, NoPayload): self.payload.add_payload(payload) else: if isinstance(payload, Packet): self.payload = payload payload.add_underlayer(self) for t in self.aliastypes: if t in payload.overload_fields: self.overloaded_fields = payload.overload_fields[t] break elif isinstance(payload, bytes): self.payload = conf.raw_layer(load=payload) else: raise TypeError("payload must be either 'Packet' or 'bytes', not [%s]" % repr(payload)) # noqa: E501 def remove_payload(self): self.payload.remove_underlayer(self) self.payload = NoPayload() self.overloaded_fields = {} def add_underlayer(self, underlayer): self.underlayer = underlayer def remove_underlayer(self, other): self.underlayer = None def copy(self): """Returns a deep copy of the instance.""" clone = self.__class__() clone.fields = self.copy_fields_dict(self.fields) clone.default_fields = self.copy_fields_dict(self.default_fields) clone.overloaded_fields = self.overloaded_fields.copy() clone.underlayer = self.underlayer clone.explicit = self.explicit clone.raw_packet_cache = self.raw_packet_cache clone.raw_packet_cache_fields = self.copy_fields_dict( self.raw_packet_cache_fields ) clone.wirelen = self.wirelen clone.post_transforms = self.post_transforms[:] clone.payload = self.payload.copy() clone.payload.add_underlayer(clone) clone.time = self.time return clone def getfieldval(self, attr): if attr in self.fields: return self.fields[attr] if attr in self.overloaded_fields: return self.overloaded_fields[attr] if attr in self.default_fields: return self.default_fields[attr] return self.payload.getfieldval(attr) def getfield_and_val(self, attr): if attr in self.fields: return self.get_field(attr), self.fields[attr] if attr in self.overloaded_fields: return self.get_field(attr), self.overloaded_fields[attr] if attr in self.default_fields: return self.get_field(attr), self.default_fields[attr] def __getattr__(self, attr): try: fld, v = self.getfield_and_val(attr) except TypeError: return self.payload.__getattr__(attr) if fld is not None: return fld.i2h(self, v) return v def setfieldval(self, attr, val): if attr in self.default_fields: fld = self.get_field(attr) if fld is None: any2i = lambda x, y: y else: any2i = fld.any2i self.fields[attr] = any2i(self, val) self.explicit = 0 self.raw_packet_cache = None self.raw_packet_cache_fields = None self.wirelen = None elif attr == "payload": self.remove_payload() self.add_payload(val) else: self.payload.setfieldval(attr, val) def __setattr__(self, attr, val): if attr in self.__all_slots__: return object.__setattr__(self, attr, val) try: return self.setfieldval(attr, val) except AttributeError: pass return object.__setattr__(self, attr, val) def delfieldval(self, attr): if attr in self.fields: del(self.fields[attr]) self.explicit = 0 # in case a default value must be explicited self.raw_packet_cache = None self.raw_packet_cache_fields = None self.wirelen = None elif attr in self.default_fields: pass elif attr == "payload": self.remove_payload() else: self.payload.delfieldval(attr) def __delattr__(self, attr): if attr == "payload": return self.remove_payload() if attr in self.__all_slots__: return object.__delattr__(self, attr) try: return self.delfieldval(attr) except AttributeError: pass return object.__delattr__(self, attr) def _superdir(self): """ Return a list of slots and methods, including those from subclasses. """ attrs = set() cls = self.__class__ if hasattr(cls, '__all_slots__'): attrs.update(cls.__all_slots__) for bcls in cls.__mro__: if hasattr(bcls, '__dict__'): attrs.update(bcls.__dict__) return attrs def __dir__(self): """ Add fields to tab completion list. """ return sorted(itertools.chain(self._superdir(), self.default_fields)) def __repr__(self): s = "" ct = conf.color_theme for f in self.fields_desc: if isinstance(f, ConditionalField) and not f._evalcond(self): continue if f.name in self.fields: val = f.i2repr(self, self.fields[f.name]) elif f.name in self.overloaded_fields: val = f.i2repr(self, self.overloaded_fields[f.name]) else: continue if isinstance(f, Emph) or f in conf.emph: ncol = ct.emph_field_name vcol = ct.emph_field_value else: ncol = ct.field_name vcol = ct.field_value s += " %s%s%s" % (ncol(f.name), ct.punct("="), vcol(val)) return "%s%s %s %s%s%s" % (ct.punct("<"), ct.layer_name(self.__class__.__name__), s, ct.punct("|"), repr(self.payload), ct.punct(">")) def __str__(self): return str(self.build()) def __bytes__(self): return self.build() def __div__(self, other): if isinstance(other, Packet): cloneA = self.copy() cloneB = other.copy() cloneA.add_payload(cloneB) return cloneA elif isinstance(other, (bytes, str)): return self / conf.raw_layer(load=other) else: return other.__rdiv__(self) __truediv__ = __div__ def __rdiv__(self, other): if isinstance(other, (bytes, str)): return conf.raw_layer(load=other) / self else: raise TypeError __rtruediv__ = __rdiv__ def __mul__(self, other): if isinstance(other, int): return [self] * other else: raise TypeError def __rmul__(self, other): return self.__mul__(other) def __nonzero__(self): return True __bool__ = __nonzero__ def __len__(self): return len(self.__bytes__()) def copy_field_value(self, fieldname, value): return self.get_field(fieldname).do_copy(value) def copy_fields_dict(self, fields): if fields is None: return None return {fname: self.copy_field_value(fname, fval) for fname, fval in six.iteritems(fields)} def self_build(self, field_pos_list=None): """ Create the default layer regarding fields_desc dict :param field_pos_list: """ if self.raw_packet_cache is not None: for fname, fval in six.iteritems(self.raw_packet_cache_fields): if self.getfieldval(fname) != fval: self.raw_packet_cache = None self.raw_packet_cache_fields = None self.wirelen = None break if self.raw_packet_cache is not None: return self.raw_packet_cache p = b"" for f in self.fields_desc: val = self.getfieldval(f.name) if isinstance(val, RawVal): sval = raw(val) p += sval if field_pos_list is not None: field_pos_list.append((f.name, sval.encode("string_escape"), len(p), len(sval))) # noqa: E501 else: p = f.addfield(self, p, val) return p def do_build_payload(self): """ Create the default version of the payload layer :return: a string of payload layer """ return self.payload.do_build() def do_build(self): """ Create the default version of the layer :return: a string of the packet with the payload """ if not self.explicit: self = next(iter(self)) pkt = self.self_build() for t in self.post_transforms: pkt = t(pkt) pay = self.do_build_payload() if self.raw_packet_cache is None: return self.post_build(pkt, pay) else: return pkt + pay def build_padding(self): return self.payload.build_padding() def build(self): """ Create the current layer :return: string of the packet with the payload """ p = self.do_build() p += self.build_padding() p = self.build_done(p) return p def post_build(self, pkt, pay): """ DEV: called right after the current layer is build. :param str pkt: the current packet (build by self_buil function) :param str pay: the packet payload (build by do_build_payload function) :return: a string of the packet with the payload """ return pkt + pay def build_done(self, p): return self.payload.build_done(p) def do_build_ps(self): p = b"" pl = [] q = b"" for f in self.fields_desc: if isinstance(f, ConditionalField) and not f._evalcond(self): continue p = f.addfield(self, p, self.getfieldval(f.name)) if isinstance(p, bytes): r = p[len(q):] q = p else: r = b"" pl.append((f, f.i2repr(self, self.getfieldval(f.name)), r)) pkt, lst = self.payload.build_ps(internal=1) p += pkt lst.append((self, pl)) return p, lst def build_ps(self, internal=0): p, lst = self.do_build_ps() # if not internal: # pkt = self # while pkt.haslayer(conf.padding_layer): # pkt = pkt.getlayer(conf.padding_layer) # lst.append( (pkt, [ ("loakjkjd", pkt.load, pkt.load) ] ) ) # p += pkt.load # pkt = pkt.payload return p, lst def psdump(self, filename=None, **kargs): """ psdump(filename=None, layer_shift=0, rebuild=1) Creates an EPS file describing a packet. If filename is not provided a temporary file is created and gs is called. :param filename: the file's filename """ canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=".eps") canvas.writeEPSfile(fname) if WINDOWS and conf.prog.psreader is None: os.startfile(fname) else: with ContextManagerSubprocess("psdump()", conf.prog.psreader): subprocess.Popen([conf.prog.psreader, fname]) else: canvas.writeEPSfile(filename) print() def pdfdump(self, filename=None, **kargs): """ pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. :param filename: the file's filename """ canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=".pdf") canvas.writePDFfile(fname) if WINDOWS and conf.prog.pdfreader is None: os.startfile(fname) else: with ContextManagerSubprocess("pdfdump()", conf.prog.pdfreader): # noqa: E501 subprocess.Popen([conf.prog.pdfreader, fname]) else: canvas.writePDFfile(filename) print() def canvas_dump(self, layer_shift=0, rebuild=1): if PYX == 0: raise ImportError("PyX and its dependencies must be installed") canvas = pyx.canvas.canvas() if rebuild: p, t = self.__class__(raw(self)).build_ps() else: p, t = self.build_ps() YTXT = len(t) for n, l in t: YTXT += len(l) YTXT = float(YTXT) YDUMP = YTXT XSTART = 1 XDSTART = 10 y = 0.0 yd = 0.0 XMUL = 0.55 YMUL = 0.4 backcolor = colgen(0.6, 0.8, 1.0, trans=pyx.color.rgb) forecolor = colgen(0.2, 0.5, 0.8, trans=pyx.color.rgb) # backcolor=makecol(0.376, 0.729, 0.525, 1.0) def hexstr(x): return " ".join("%02x" % orb(c) for c in x) def make_dump_txt(x, y, txt): return pyx.text.text(XDSTART + x * XMUL, (YDUMP - y) * YMUL, r"\tt{%s}" % hexstr(txt), [pyx.text.size.Large]) # noqa: E501 def make_box(o): return pyx.box.rect(o.left(), o.bottom(), o.width(), o.height(), relcenter=(0.5, 0.5)) # noqa: E501 def make_frame(lst): if len(lst) == 1: b = lst[0].bbox() b.enlarge(pyx.unit.u_pt) return b.path() else: fb = lst[0].bbox() fb.enlarge(pyx.unit.u_pt) lb = lst[-1].bbox() lb.enlarge(pyx.unit.u_pt) if len(lst) == 2 and fb.left() > lb.right(): return pyx.path.path(pyx.path.moveto(fb.right(), fb.top()), pyx.path.lineto(fb.left(), fb.top()), pyx.path.lineto(fb.left(), fb.bottom()), # noqa: E501 pyx.path.lineto(fb.right(), fb.bottom()), # noqa: E501 pyx.path.moveto(lb.left(), lb.top()), pyx.path.lineto(lb.right(), lb.top()), pyx.path.lineto(lb.right(), lb.bottom()), # noqa: E501 pyx.path.lineto(lb.left(), lb.bottom())) # noqa: E501 else: # XXX gb = lst[1].bbox() if gb != lb: gb.enlarge(pyx.unit.u_pt) kb = lst[-2].bbox() if kb != gb and kb != lb: kb.enlarge(pyx.unit.u_pt) return pyx.path.path(pyx.path.moveto(fb.left(), fb.top()), pyx.path.lineto(fb.right(), fb.top()), pyx.path.lineto(fb.right(), kb.bottom()), # noqa: E501 pyx.path.lineto(lb.right(), kb.bottom()), # noqa: E501 pyx.path.lineto(lb.right(), lb.bottom()), # noqa: E501 pyx.path.lineto(lb.left(), lb.bottom()), # noqa: E501 pyx.path.lineto(lb.left(), gb.top()), pyx.path.lineto(fb.left(), gb.top()), pyx.path.closepath(),) def make_dump(s, shift=0, y=0, col=None, bkcol=None, large=16): c = pyx.canvas.canvas() tlist = [] while s: dmp, s = s[:large - shift], s[large - shift:] txt = make_dump_txt(shift, y, dmp) tlist.append(txt) shift += len(dmp) if shift >= 16: shift = 0 y += 1 if col is None: col = pyx.color.rgb.red if bkcol is None: col = pyx.color.rgb.white c.stroke(make_frame(tlist), [col, pyx.deco.filled([bkcol]), pyx.style.linewidth.Thick]) # noqa: E501 for txt in tlist: c.insert(txt) return c, tlist[-1].bbox(), shift, y last_shift, last_y = 0, 0.0 while t: bkcol = next(backcolor) proto, fields = t.pop() y += 0.5 pt = pyx.text.text(XSTART, (YTXT - y) * YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % proto.name, [pyx.text.size.Large]) # noqa: E501 y += 1 ptbb = pt.bbox() ptbb.enlarge(pyx.unit.u_pt * 2) canvas.stroke(ptbb.path(), [pyx.color.rgb.black, pyx.deco.filled([bkcol])]) # noqa: E501 canvas.insert(pt) for fname, fval, fdump in fields: col = next(forecolor) ft = pyx.text.text(XSTART, (YTXT - y) * YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fname.name)) # noqa: E501 if isinstance(fval, str): if len(fval) > 18: fval = fval[:18] + "[...]" else: fval = "" vt = pyx.text.text(XSTART + 3, (YTXT - y) * YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fval)) # noqa: E501 y += 1.0 if fdump: dt, target, last_shift, last_y = make_dump(fdump, last_shift, last_y, col, bkcol) # noqa: E501 dtb = dt.bbox() dtb = target vtb = vt.bbox() bxvt = make_box(vtb) bxdt = make_box(dtb) dtb.enlarge(pyx.unit.u_pt) try: if yd < 0: cnx = pyx.connector.curve(bxvt, bxdt, absangle1=0, absangle2=-90) # noqa: E501 else: cnx = pyx.connector.curve(bxvt, bxdt, absangle1=0, absangle2=90) # noqa: E501 except: pass else: canvas.stroke(cnx, [pyx.style.linewidth.thin, pyx.deco.earrow.small, col]) # noqa: E501 canvas.insert(dt) canvas.insert(ft) canvas.insert(vt) last_y += layer_shift return canvas def extract_padding(self, s): """ DEV: to be overloaded to extract current layer's padding. :param str s: the current layer :return: a couple of strings (actual layer, padding) """ return s, None def post_dissect(self, s): """DEV: is called right after the current layer has been dissected""" return s def pre_dissect(self, s): """DEV: is called right before the current layer is dissected""" return s def do_dissect(self, s): s = raw(s) _raw = s self.raw_packet_cache_fields = {} # Temporary value, used by getfield() in some advanced cases (eg: dot11) # noqa: E501 _lr = len(_raw) self._tmp_dissect_pos = 0 # How many bytes have already been dissected for f in self.fields_desc: if not s: break s, fval = f.getfield(self, s) self._tmp_dissect_pos = _lr - len(s) # We need to track fields with mutable values to discard # .raw_packet_cache when needed. if f.islist or f.holds_packets or f.ismutable: self.raw_packet_cache_fields[f.name] = f.do_copy(fval) self.fields[f.name] = fval assert(_raw.endswith(raw(s))) del self._tmp_dissect_pos self.raw_packet_cache = _raw[:-len(s)] if s else _raw self.explicit = 1 return s def do_dissect_payload(self, s): """ Perform the dissection of the layer's payload :param str s: the raw layer """ if s: cls = self.guess_payload_class(s) try: p = cls(s, _internal=1, _underlayer=self) except KeyboardInterrupt: raise except: if conf.debug_dissector: if issubtype(cls, Packet): log_runtime.error("%s dissector failed" % cls.__name__) else: log_runtime.error("%s.guess_payload_class() returned [%s]" % (self.__class__.__name__, repr(cls))) # noqa: E501 if cls is not None: raise p = conf.raw_layer(s, _internal=1, _underlayer=self) self.add_payload(p) def dissect(self, s): s = self.pre_dissect(s) s = self.do_dissect(s) s = self.post_dissect(s) payl, pad = self.extract_padding(s) self.do_dissect_payload(payl) if pad and conf.padding: self.add_payload(conf.padding_layer(pad)) def guess_payload_class(self, payload): """ DEV: Guesses the next payload class from layer bonds. Can be overloaded to use a different mechanism. :param str payload: the layer's payload :return: the payload class """ for t in self.aliastypes: for fval, cls in t.payload_guess: if all(hasattr(self, k) and v == self.getfieldval(k) for k, v in six.iteritems(fval)): return cls return self.default_payload_class(payload) def default_payload_class(self, payload): """ DEV: Returns the default payload class if nothing has been found by the guess_payload_class() method. :param str payload: the layer's payload :return: the default payload class define inside the configuration file """ return conf.raw_layer def hide_defaults(self): """Removes fields' values that are the same as default values.""" for k, v in list(self.fields.items()): # use list(): self.fields is modified in the loop # noqa: E501 v = self.fields[k] if k in self.default_fields: if self.default_fields[k] == v: del self.fields[k] self.payload.hide_defaults() def clone_with(self, payload=None, **kargs): pkt = self.__class__() pkt.explicit = 1 pkt.fields = kargs pkt.default_fields = self.copy_fields_dict(self.default_fields) pkt.overloaded_fields = self.overloaded_fields.copy() pkt.time = self.time pkt.underlayer = self.underlayer pkt.post_transforms = self.post_transforms pkt.raw_packet_cache = self.raw_packet_cache pkt.raw_packet_cache_fields = self.copy_fields_dict( self.raw_packet_cache_fields ) pkt.wirelen = self.wirelen if payload is not None: pkt.add_payload(payload) return pkt def __iter__(self): def loop(todo, done, self=self): if todo: eltname = todo.pop() elt = self.getfieldval(eltname) if not isinstance(elt, Gen): if self.get_field(eltname).islist: elt = SetGen([elt]) else: elt = SetGen(elt) for e in elt: done[eltname] = e for x in loop(todo[:], done): yield x else: if isinstance(self.payload, NoPayload): payloads = [None] else: payloads = self.payload for payl in payloads: done2 = done.copy() for k in done2: if isinstance(done2[k], VolatileValue): done2[k] = done2[k]._fix() pkt = self.clone_with(payload=payl, **done2) yield pkt if self.explicit or self.raw_packet_cache is not None: todo = [] done = self.fields else: todo = [k for (k, v) in itertools.chain(six.iteritems(self.default_fields), # noqa: E501 six.iteritems(self.overloaded_fields)) # noqa: E501 if isinstance(v, VolatileValue)] + list(self.fields.keys()) done = {} return loop(todo, done) def __iterlen__(self): """Predict the total length of the iterator""" fields = [key for (key, val) in itertools.chain(six.iteritems(self.default_fields), # noqa: E501 six.iteritems(self.overloaded_fields)) if isinstance(val, VolatileValue)] + list(self.fields.keys()) length = 1 for field in fields: val = self.getfieldval(field) if hasattr(val, "__iterlen__"): length *= val.__iterlen__() elif isinstance(val, tuple) and len(val) == 2 and all(hasattr(z, "__int__") for z in val): # noqa: E501 length *= (val[1] - val[0]) elif isinstance(val, list): len2 = 0 for x in val: if hasattr(x, "__iterlen__"): len2 += x.__iterlen__() elif isinstance(x, tuple) and len(x) == 2 and all(hasattr(z, "__int__") for z in x): # noqa: E501 len2 += (x[1] - x[0]) elif isinstance(x, list): len2 += len(x) else: len2 += 1 length *= len2 or 1 if not isinstance(self.payload, NoPayload): return length * self.payload.__iterlen__() return length def __gt__(self, other): """True if other is an answer from self (self ==> other).""" if isinstance(other, Packet): return other < self elif isinstance(other, bytes): return 1 else: raise TypeError((self, other)) def __lt__(self, other): """True if self is an answer from other (other ==> self).""" if isinstance(other, Packet): return self.answers(other) elif isinstance(other, bytes): return 1 else: raise TypeError((self, other)) def __eq__(self, other): if not isinstance(other, self.__class__): return False for f in self.fields_desc: if f not in other.fields_desc: return False if self.getfieldval(f.name) != other.getfieldval(f.name): return False return self.payload == other.payload def __ne__(self, other): return not self.__eq__(other) def hashret(self): """DEV: returns a string that has the same value for a request and its answer.""" # noqa: E501 return self.payload.hashret() def answers(self, other): """DEV: true if self is an answer from other""" if other.__class__ == self.__class__: return self.payload.answers(other.payload) return 0 def haslayer(self, cls): """true if self has a layer that is an instance of cls. Superseded by "cls in self" syntax.""" # noqa: E501 if self.__class__ == cls or cls in [self.__class__.__name__, self._name]: return True for f in self.packetfields: fvalue_gen = self.getfieldval(f.name) if fvalue_gen is None: continue if not f.islist: fvalue_gen = SetGen(fvalue_gen, _iterpacket=0) for fvalue in fvalue_gen: if isinstance(fvalue, Packet): ret = fvalue.haslayer(cls) if ret: return ret return self.payload.haslayer(cls) def getlayer(self, cls, nb=1, _track=None, _subclass=False, **flt): """Return the nb^th layer that is an instance of cls, matching flt values. """ if _subclass: match = lambda cls1, cls2: issubclass(cls1, cls2) else: match = lambda cls1, cls2: cls1 == cls2 if isinstance(cls, int): nb = cls + 1 cls = None if isinstance(cls, str) and "." in cls: ccls, fld = cls.split(".", 1) else: ccls, fld = cls, None if cls is None or match(self.__class__, cls) \ or ccls in [self.__class__.__name__, self._name]: if all(self.getfieldval(fldname) == fldvalue for fldname, fldvalue in six.iteritems(flt)): if nb == 1: if fld is None: return self else: return self.getfieldval(fld) else: nb -= 1 for f in self.packetfields: fvalue_gen = self.getfieldval(f.name) if fvalue_gen is None: continue if not f.islist: fvalue_gen = SetGen(fvalue_gen, _iterpacket=0) for fvalue in fvalue_gen: if isinstance(fvalue, Packet): track = [] ret = fvalue.getlayer(cls, nb=nb, _track=track, _subclass=_subclass, **flt) if ret is not None: return ret nb = track[0] return self.payload.getlayer(cls, nb=nb, _track=_track, _subclass=_subclass, **flt) def firstlayer(self): q = self while q.underlayer is not None: q = q.underlayer return q def __getitem__(self, cls): if isinstance(cls, slice): lname = cls.start if cls.stop: ret = self.getlayer(cls.start, nb=cls.stop, **(cls.step or {})) else: ret = self.getlayer(cls.start, **(cls.step or {})) else: lname = cls ret = self.getlayer(cls) if ret is None: if isinstance(lname, Packet_metaclass): lname = lname.__name__ elif not isinstance(lname, bytes): lname = repr(lname) raise IndexError("Layer [%s] not found" % lname) return ret def __delitem__(self, cls): del(self[cls].underlayer.payload) def __setitem__(self, cls, val): self[cls].underlayer.payload = val def __contains__(self, cls): """"cls in self" returns true if self has a layer which is an instance of cls.""" # noqa: E501 return self.haslayer(cls) def route(self): return (None, None, None) def fragment(self, *args, **kargs): return self.payload.fragment(*args, **kargs) def display(self, *args, **kargs): # Deprecated. Use show() """Deprecated. Use show() method.""" self.show(*args, **kargs) def _show_or_dump(self, dump=False, indent=3, lvl="", label_lvl="", first_call=True): # noqa: E501 """ Internal method that shows or dumps a hierarchical view of a packet. Called by show. :param dump: determine if it prints or returns the string value :param int indent: the size of indentation for each layer :param str lvl: additional information about the layer lvl :param str label_lvl: additional information about the layer fields :param first_call: determine if the current function is the first :return: return a hierarchical view if dump, else print it """ if dump: from scapy.themes import AnsiColorTheme ct = AnsiColorTheme() # No color for dump output else: ct = conf.color_theme s = "%s%s %s %s \n" % (label_lvl, ct.punct("###["), ct.layer_name(self.name), ct.punct("]###")) for f in self.fields_desc: if isinstance(f, ConditionalField) and not f._evalcond(self): continue if isinstance(f, Emph) or f in conf.emph: ncol = ct.emph_field_name vcol = ct.emph_field_value else: ncol = ct.field_name vcol = ct.field_value fvalue = self.getfieldval(f.name) if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and isinstance(fvalue, list)): # noqa: E501 s += "%s \\%-10s\\\n" % (label_lvl + lvl, ncol(f.name)) fvalue_gen = SetGen(fvalue, _iterpacket=0) for fvalue in fvalue_gen: s += fvalue._show_or_dump(dump=dump, indent=indent, label_lvl=label_lvl + lvl + " |", first_call=False) # noqa: E501 else: begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name), ct.punct("="),) reprval = f.i2repr(self, fvalue) if isinstance(reprval, str): reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) # noqa: E501 + len(lvl) + len(f.name) + 4)) s += "%s%s\n" % (begn, vcol(reprval)) if self.payload: s += self.payload._show_or_dump(dump=dump, indent=indent, lvl=lvl + (" " * indent * self.show_indent), label_lvl=label_lvl, first_call=False) # noqa: E501 if first_call and not dump: print(s) else: return s def show(self, dump=False, indent=3, lvl="", label_lvl=""): """ Prints or returns (when "dump" is true) a hierarchical view of the packet. :param dump: determine if it prints or returns the string value :param int indent: the size of indentation for each layer :param str lvl: additional information about the layer lvl :param str label_lvl: additional information about the layer fields :return: return a hierarchical view if dump, else print it """ return self._show_or_dump(dump, indent, lvl, label_lvl) def show2(self, dump=False, indent=3, lvl="", label_lvl=""): """ Prints or returns (when "dump" is true) a hierarchical view of an assembled version of the packet, so that automatic fields are calculated (checksums, etc.) :param dump: determine if it prints or returns the string value :param int indent: the size of indentation for each layer :param str lvl: additional information about the layer lvl :param str label_lvl: additional information about the layer fields :return: return a hierarchical view if dump, else print it """ return self.__class__(raw(self)).show(dump, indent, lvl, label_lvl) def sprintf(self, fmt, relax=1): """sprintf(format, [relax=1]) -> str where format is a string that can include directives. A directive begins and ends by % and has the following format %[fmt[r],][cls[:nb].]field%. fmt is a classic printf directive, "r" can be appended for raw substitution (ex: IP.flags=0x18 instead of SA), nb is the number of the layer we want (ex: for IP/IP packets, IP:2.src is the src of the upper IP layer). Special case : "%.time%" is the creation time. Ex : p.sprintf("%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% " "%03xr,IP.proto% %r,TCP.flags%") Moreover, the format string can include conditional statements. A conditional statement looks like : {layer:string} where layer is a layer name, and string is the string to insert in place of the condition if it is true, i.e. if layer is present. If layer is preceded by a "!", the result is inverted. Conditions can be imbricated. A valid statement can be : p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet") p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}") A side effect is that, to obtain "{" and "}" characters, you must use "%(" and "%)". """ escape = {"%": "%", "(": "{", ")": "}"} # Evaluate conditions while "{" in fmt: i = fmt.rindex("{") j = fmt[i + 1:].index("}") cond = fmt[i + 1:i + j + 1] k = cond.find(":") if k < 0: raise Scapy_Exception("Bad condition in format string: [%s] (read sprintf doc!)" % cond) # noqa: E501 cond, format = cond[:k], cond[k + 1:] res = False if cond[0] == "!": res = True cond = cond[1:] if self.haslayer(cond): res = not res if not res: format = "" fmt = fmt[:i] + format + fmt[i + j + 2:] # Evaluate directives s = "" while "%" in fmt: i = fmt.index("%") s += fmt[:i] fmt = fmt[i + 1:] if fmt and fmt[0] in escape: s += escape[fmt[0]] fmt = fmt[1:] continue try: i = fmt.index("%") sfclsfld = fmt[:i] fclsfld = sfclsfld.split(",") if len(fclsfld) == 1: f = "s" clsfld = fclsfld[0] elif len(fclsfld) == 2: f, clsfld = fclsfld else: raise Scapy_Exception if "." in clsfld: cls, fld = clsfld.split(".") else: cls = self.__class__.__name__ fld = clsfld num = 1 if ":" in cls: cls, num = cls.split(":") num = int(num) fmt = fmt[i + 1:] except: raise Scapy_Exception("Bad format string [%%%s%s]" % (fmt[:25], fmt[25:] and "...")) # noqa: E501 else: if fld == "time": val = time.strftime("%H:%M:%S.%%06i", time.localtime(self.time)) % int((self.time - int(self.time)) * 1000000) # noqa: E501 elif cls == self.__class__.__name__ and hasattr(self, fld): if num > 1: val = self.payload.sprintf("%%%s,%s:%s.%s%%" % (f, cls, num - 1, fld), relax) # noqa: E501 f = "s" elif f[-1] == "r": # Raw field value val = getattr(self, fld) f = f[:-1] if not f: f = "s" else: val = getattr(self, fld) if fld in self.fieldtype: val = self.fieldtype[fld].i2repr(self, val) else: val = self.payload.sprintf("%%%s%%" % sfclsfld, relax) f = "s" s += ("%" + f) % val s += fmt return s def mysummary(self): """DEV: can be overloaded to return a string that summarizes the layer. Only one mysummary() is used in a whole packet summary: the one of the upper layer, # noqa: E501 except if a mysummary() also returns (as a couple) a list of layers whose # noqa: E501 mysummary() must be called if they are present.""" return "" def _do_summary(self): found, s, needed = self.payload._do_summary() ret = "" if not found or self.__class__ in needed: ret = self.mysummary() if isinstance(ret, tuple): ret, n = ret needed += n if ret or needed: found = 1 if not ret: ret = self.__class__.__name__ if self.show_summary else "" if self.__class__ in conf.emph: impf = [] for f in self.fields_desc: if f in conf.emph: impf.append("%s=%s" % (f.name, f.i2repr(self, self.getfieldval(f.name)))) # noqa: E501 ret = "%s [%s]" % (ret, " ".join(impf)) if ret and s: ret = "%s / %s" % (ret, s) else: ret = "%s%s" % (ret, s) return found, ret, needed def summary(self, intern=0): """Prints a one line summary of a packet.""" found, s, needed = self._do_summary() return s def lastlayer(self, layer=None): """Returns the uppest layer of the packet""" return self.payload.lastlayer(self) def decode_payload_as(self, cls): """Reassembles the payload and decode it using another packet class""" s = raw(self.payload) self.payload = cls(s, _internal=1, _underlayer=self) pp = self while pp.underlayer is not None: pp = pp.underlayer self.payload.dissection_done(pp) def command(self): """Returns a string representing the command you have to type to obtain the same packet""" # noqa: E501 f = [] for fn, fv in self.fields.items(): fld = self.get_field(fn) if isinstance(fv, Packet): fv = fv.command() elif fld.islist and fld.holds_packets and isinstance(fv, list): fv = "[%s]" % ",".join(map(Packet.command, fv)) elif isinstance(fld, FlagsField): fv = int(fv) else: fv = repr(fv) f.append("%s=%s" % (fn, fv)) c = "%s(%s)" % (self.__class__.__name__, ", ".join(f)) pc = self.payload.command() if pc: c += "/" + pc return c class NoPayload(Packet): def __new__(cls, *args, **kargs): singl = cls.__dict__.get("__singl__") if singl is None: cls.__singl__ = singl = Packet.__new__(cls) Packet.__init__(singl) return singl def __init__(self, *args, **kargs): pass def dissection_done(self, pkt): return def add_payload(self, payload): raise Scapy_Exception("Can't add payload to NoPayload instance") def remove_payload(self): pass def add_underlayer(self, underlayer): pass def remove_underlayer(self, other): pass def copy(self): return self def __repr__(self): return "" def __str__(self): return "" def __bytes__(self): return b"" def __nonzero__(self): return False __bool__ = __nonzero__ def do_build(self): return b"" def build(self): return b"" def build_padding(self): return b"" def build_done(self, p): return p def build_ps(self, internal=0): return b"", [] def getfieldval(self, attr): raise AttributeError(attr) def getfield_and_val(self, attr): raise AttributeError(attr) def setfieldval(self, attr, val): raise AttributeError(attr) def delfieldval(self, attr): raise AttributeError(attr) def hide_defaults(self): pass def __iter__(self): return iter([]) def __eq__(self, other): if isinstance(other, NoPayload): return True return False def hashret(self): return b"" def answers(self, other): return isinstance(other, NoPayload) or isinstance(other, conf.padding_layer) # noqa: E501 def haslayer(self, cls): return 0 def getlayer(self, cls, nb=1, _track=None, **flt): if _track is not None: _track.append(nb) return None def fragment(self, *args, **kargs): raise Scapy_Exception("cannot fragment this packet") def show(self, indent=3, lvl="", label_lvl=""): pass def sprintf(self, fmt, relax): if relax: return "??" else: raise Scapy_Exception("Format not found [%s]" % fmt) def _do_summary(self): return 0, "", [] def lastlayer(self, layer): return layer def command(self): return "" #################### # packet classes # #################### class Raw(Packet): name = "Raw" fields_desc = [StrField("load", "")] def answers(self, other): return 1 # s = raw(other) # t = self.load # l = min(len(s), len(t)) # return s[:l] == t[:l] def mysummary(self): cs = conf.raw_summary if cs: if callable(cs): return "Raw %s" % cs(self.load) else: return "Raw %r" % self.load return Packet.mysummary(self) class Padding(Raw): name = "Padding" def self_build(self): return b"" def build_padding(self): return (raw(self.load) if self.raw_packet_cache is None else self.raw_packet_cache) + self.payload.build_padding() conf.raw_layer = Raw conf.padding_layer = Padding if conf.default_l2 is None: conf.default_l2 = Raw ################# # Bind layers # ################# def bind_bottom_up(lower, upper, __fval=None, **fval): """Bind 2 layers for dissection. The upper layer will be chosen for dissection on top of the lower layer, if ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501 layers, the last one will be used as default. ex: >>> bind_bottom_up(Ether, SNAP, type=0x1234) >>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501 <Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501 """ if __fval is not None: fval.update(__fval) lower.payload_guess = lower.payload_guess[:] lower.payload_guess.append((fval, upper)) def bind_top_down(lower, upper, __fval=None, **fval): """Bind 2 layers for building. When the upper layer is added as a payload of the lower layer, all the arguments # noqa: E501 will be applied to them. ex: >>> bind_top_down(Ether, SNAP, type=0x1234) >>> Ether()/SNAP() <Ether type=0x1234 |<SNAP |>> """ if __fval is not None: fval.update(__fval) upper._overload_fields = upper._overload_fields.copy() upper._overload_fields[lower] = fval @conf.commands.register def bind_layers(lower, upper, __fval=None, **fval): """Bind 2 layers on some specific fields' values. It makes the packet being built # noqa: E501 and dissected when the arguments are present. This functions calls both bind_bottom_up and bind_top_down, with all passed arguments. # noqa: E501 Please have a look at their docs: - help(bind_bottom_up) - help(bind_top_down) """ if __fval is not None: fval.update(__fval) bind_top_down(lower, upper, **fval) bind_bottom_up(lower, upper, **fval) def split_bottom_up(lower, upper, __fval=None, **fval): """This call un-links an association that was made using bind_bottom_up. Have a look at help(bind_bottom_up) """ if __fval is not None: fval.update(__fval) def do_filter(xxx_todo_changeme, upper=upper, fval=fval): (f, u) = xxx_todo_changeme return u != upper or any(k not in f or f[k] != v for k, v in six.iteritems(fval)) # noqa: E501 lower.payload_guess = [x for x in lower.payload_guess if do_filter(x)] def split_top_down(lower, upper, __fval=None, **fval): """This call un-links an association that was made using bind_top_down. Have a look at help(bind_top_down) """ if __fval is not None: fval.update(__fval) if lower in upper._overload_fields: ofval = upper._overload_fields[lower] if any(k not in ofval or ofval[k] != v for k, v in six.iteritems(fval)): # noqa: E501 return upper._overload_fields = upper._overload_fields.copy() del(upper._overload_fields[lower]) @conf.commands.register def split_layers(lower, upper, __fval=None, **fval): """Split 2 layers previously bound. This call un-links calls bind_top_down and bind_bottom_up. It is the opposite of # noqa: E501 bind_layers. Please have a look at their docs: - help(split_bottom_up) - help(split_top_down) """ if __fval is not None: fval.update(__fval) split_bottom_up(lower, upper, **fval) split_top_down(lower, upper, **fval) @conf.commands.register def ls(obj=None, case_sensitive=False, verbose=False): """List available layers, or infos on a given layer class or name""" is_string = isinstance(obj, six.string_types) if obj is None or is_string: if obj is None: all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re.compile(obj, 0 if case_sensitive else re.I) all_layers = sorted((layer for layer in conf.layers if (pattern.search(layer.__name__ or '') or pattern.search(layer.name or ''))), key=lambda x: x.__name__) for layer in all_layers: print("%-10s : %s" % (layer.__name__, layer._name)) else: is_pkt = isinstance(obj, Packet) if issubtype(obj, Packet) or is_pkt: for f in obj.fields_desc: cur_fld = f attrs = [] long_attrs = [] while isinstance(cur_fld, (Emph, ConditionalField)): if isinstance(cur_fld, ConditionalField): attrs.append(cur_fld.__class__.__name__[:4]) cur_fld = cur_fld.fld if verbose and isinstance(cur_fld, EnumField) \ and hasattr(cur_fld, "i2s"): if len(cur_fld.i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_fld.i2s)) ) elif isinstance(cur_fld, MultiEnumField): fld_depend = cur_fld.depends_on(obj.__class__ if is_pkt else obj) attrs.append("Depends on %s" % fld_depend.name) if verbose: cur_i2s = cur_fld.i2s_multi.get( cur_fld.depends_on(obj if is_pkt else obj()), {} ) if len(cur_i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_i2s)) ) elif verbose and isinstance(cur_fld, FlagsField): names = cur_fld.names long_attrs.append(", ".join(names)) class_name = "%s (%s)" % ( cur_fld.__class__.__name__, ", ".join(attrs)) if attrs else cur_fld.__class__.__name__ if isinstance(cur_fld, BitField): class_name += " (%d bit%s)" % (cur_fld.size, "s" if cur_fld.size > 1 else "") print("%-10s : %-35s =" % (f.name, class_name), end=' ') if is_pkt: print("%-15r" % (getattr(obj, f.name),), end=' ') print("(%r)" % (f.default,)) for attr in long_attrs: print("%-15s%s" % ("", attr)) if is_pkt and not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class or name. Type 'ls()' to list packet classes.") # noqa: E501 ############# # Fuzzing # ############# @conf.commands.register def fuzz(p, _inplace=0): """Transform a layer into a fuzzy layer by replacing some default values by random objects""" # noqa: E501 if not _inplace: p = p.copy() q = p while not isinstance(q, NoPayload): for f in q.fields_desc: if isinstance(f, PacketListField): for r in getattr(q, f.name): print("fuzzing", repr(r)) fuzz(r, _inplace=1) elif f.default is not None: if not isinstance(f, ConditionalField) or f._evalcond(q): rnd = f.randval() if rnd is not None: q.default_fields[f.name] = rnd q = q.payload return p
1
13,646
It's useally a common practice to not use `l` as a variable (we have disabled this test in scapy because our code has tons, but it would have PEP8 failed otherwise), but it would be better not to create any. In general, single letter variables are not liked that much. Especially with windows where the fonts make 1=l How about `lay` ?
secdev-scapy
py
@@ -40,6 +40,12 @@ func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { return } + w.Header().Set(SwarmTagUidHeader, fmt.Sprint(tag.Uid)) + w.WriteHeader(http.StatusContinue) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + // Add the tag to the context ctx := sctx.SetTag(r.Context(), tag)
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package api import ( "bytes" "errors" "fmt" "io" "io/ioutil" "net/http" "github.com/ethersphere/bee/pkg/netstore" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/sctx" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tags" "github.com/gorilla/mux" ) func (s *server) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { addr := mux.Vars(r)["addr"] address, err := swarm.ParseHexAddress(addr) if err != nil { s.Logger.Debugf("chunk upload: parse chunk address %s: %v", addr, err) s.Logger.Error("chunk upload: parse chunk address") jsonhttp.BadRequest(w, "invalid chunk address") return } tag, _, err := s.getOrCreateTag(r.Header.Get(SwarmTagUidHeader)) if err != nil { s.Logger.Debugf("chunk upload: get or create tag: %v", err) s.Logger.Error("chunk upload: get or create tag") jsonhttp.InternalServerError(w, "cannot get or create tag") return } // Add the tag to the context ctx := sctx.SetTag(r.Context(), tag) // Increment the StateSplit here since we dont have a splitter for the file upload tag.Inc(tags.StateSplit) data, err := ioutil.ReadAll(r.Body) if err != nil { if jsonhttp.HandleBodyReadError(err, w) { return } s.Logger.Debugf("chunk upload: read chunk data error: %v, addr %s", err, address) s.Logger.Error("chunk upload: read chunk data error") jsonhttp.InternalServerError(w, "cannot read chunk data") return } seen, err := s.Storer.Put(ctx, requestModePut(r), swarm.NewChunk(address, data)) if err != nil { s.Logger.Debugf("chunk upload: chunk write error: %v, addr %s", err, address) s.Logger.Error("chunk upload: chunk write error") jsonhttp.BadRequest(w, "chunk write error") return } else if len(seen) > 0 && seen[0] { tag.Inc(tags.StateSeen) } // Indicate that the chunk is stored tag.Inc(tags.StateStored) w.Header().Set(SwarmTagUidHeader, fmt.Sprint(tag.Uid)) w.Header().Set("Access-Control-Expose-Headers", SwarmTagUidHeader) jsonhttp.OK(w, nil) } func (s *server) chunkGetHandler(w http.ResponseWriter, r *http.Request) { targets := r.URL.Query().Get("targets") r = r.WithContext(sctx.SetTargets(r.Context(), targets)) addr := mux.Vars(r)["addr"] ctx := r.Context() address, err := swarm.ParseHexAddress(addr) if err != nil { s.Logger.Debugf("chunk: parse chunk address %s: %v", addr, err) s.Logger.Error("chunk: parse chunk address error") jsonhttp.BadRequest(w, "invalid chunk address") return } chunk, err := s.Storer.Get(ctx, storage.ModeGetRequest, address) if err != nil { if errors.Is(err, storage.ErrNotFound) { s.Logger.Trace("chunk: chunk not found. addr %s", address) jsonhttp.NotFound(w, "chunk not found") return } if errors.Is(err, netstore.ErrRecoveryAttempt) { s.Logger.Trace("chunk: chunk recovery initiated. addr %s", address) jsonhttp.Accepted(w, "chunk recovery initiated. retry after sometime.") return } s.Logger.Debugf("chunk: chunk read error: %v ,addr %s", err, address) s.Logger.Error("chunk: chunk read error") jsonhttp.InternalServerError(w, "chunk read error") return } w.Header().Set("Content-Type", "binary/octet-stream") w.Header().Set(TargetsRecoveryHeader, targets) _, _ = io.Copy(w, bytes.NewReader(chunk.Data())) }
1
11,927
DRY these 5 lines into a function call, no?
ethersphere-bee
go
@@ -113,12 +113,13 @@ func Main() int { signal.Notify(term, os.Interrupt, syscall.SIGTERM) select { case <-term: - fmt.Fprint(os.Stdout, "Received SIGTERM, exiting gracefully...") + logger.Log("msg", "Received SIGTERM, exiting gracefully...") l.Close() close(stopc) wg.Wait() - case <-errc: - fmt.Fprintf(os.Stderr, "Unhandled error received. Exiting...") + case err := <-errc: + logger.Log("msg", "Unhandled error received. Exiting...", "err", err) + l.Close() close(stopc) wg.Wait() return 1
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "fmt" "net" "net/http" "os" "os/signal" "sync" "syscall" "github.com/coreos/prometheus-operator/pkg/alertmanager" "github.com/coreos/prometheus-operator/pkg/analytics" "github.com/coreos/prometheus-operator/pkg/api" "github.com/coreos/prometheus-operator/pkg/prometheus" "github.com/go-kit/kit/log" ) var ( cfg prometheus.Config analyticsEnabled bool ) func init() { flagset := flag.NewFlagSet(os.Args[0], flag.ExitOnError) flagset.StringVar(&cfg.Host, "apiserver", "", "API Server addr, e.g. ' - NOT RECOMMENDED FOR PRODUCTION - http://127.0.0.1:8080'. Omit parameter to run in on-cluster mode and utilize the service account token.") flagset.StringVar(&cfg.TLSConfig.CertFile, "cert-file", "", " - NOT RECOMMENDED FOR PRODUCTION - Path to public TLS certificate file.") flagset.StringVar(&cfg.TLSConfig.KeyFile, "key-file", "", "- NOT RECOMMENDED FOR PRODUCTION - Path to private TLS certificate file.") flagset.StringVar(&cfg.TLSConfig.CAFile, "ca-file", "", "- NOT RECOMMENDED FOR PRODUCTION - Path to TLS CA file.") flagset.BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "- NOT RECOMMENDED FOR PRODUCTION - Don't verify API server's CA certificate.") flagset.BoolVar(&analyticsEnabled, "analytics", true, "Send analytical event (Cluster Created/Deleted etc.) to Google Analytics") flagset.Parse(os.Args[1:]) } func Main() int { logger := log.NewContext(log.NewLogfmtLogger(os.Stdout)). With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) if analyticsEnabled { analytics.Enable() } po, err := prometheus.New(cfg, logger.With("component", "prometheusoperator")) if err != nil { fmt.Fprint(os.Stderr, err) return 1 } ao, err := alertmanager.New(cfg, logger.With("component", "alertmanageroperator")) if err != nil { fmt.Fprint(os.Stderr, err) return 1 } web, err := api.New(cfg, logger.With("component", "api")) if err != nil { fmt.Fprint(os.Stderr, err) return 1 } mux := http.DefaultServeMux web.Register(mux) l, err := net.Listen("tcp", ":8080") if err != nil { fmt.Fprint(os.Stderr, err) return 1 } stopc := make(chan struct{}) errc := make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { if err := po.Run(stopc); err != nil { errc <- err } wg.Done() }() wg.Add(1) go func() { if err := ao.Run(stopc); err != nil { errc <- err } wg.Done() }() go func() { if err := http.Serve(l, nil); err != nil { errc <- err } }() term := make(chan os.Signal) signal.Notify(term, os.Interrupt, syscall.SIGTERM) select { case <-term: fmt.Fprint(os.Stdout, "Received SIGTERM, exiting gracefully...") l.Close() close(stopc) wg.Wait() case <-errc: fmt.Fprintf(os.Stderr, "Unhandled error received. Exiting...") close(stopc) wg.Wait() return 1 } return 0 } func main() { os.Exit(Main()) }
1
7,461
should we be trying to close here? or just return directly @fabxc ? same goes for `stopc`...
prometheus-operator-prometheus-operator
go
@@ -177,7 +177,7 @@ describe Ncr::ApprovalManager do context "for a BA60 or BA61 request" do it "uses BA61 tier1 team approver when org code matches" do - org_letters = %w( 7 J 4 T 1 A C Z ) + org_letters = %w( 1 4 7 A C J T Z ) org_letters.each do |org_letter| org_code = "P11#{org_letter}XXXX" ncr_org = create(:ncr_organization, code: org_code)
1
describe Ncr::ApprovalManager do describe '#setup_approvals_and_observers' do let (:ba61_tier_one) { Ncr::Mailboxes.ba61_tier1_budget } let (:ba61_tier_two) { Ncr::Mailboxes.ba61_tier2_budget } it "creates approvers when not an emergency" do wo = create(:ncr_work_order, expense_type: 'BA61') manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers expect(wo.observations.length).to eq(0) expect(wo.approvers).to eq([ wo.approving_official, ba61_tier_one, ba61_tier_two ]) wo.reload expect(wo.completed?).to eq(false) end it "replaces approving official step when approving official changed to system approver delegate" do new_user = create(:user) user = create(:user) work_order = create(:ncr_work_order, approving_official: user) Ncr::ApprovalManager.new(work_order).setup_approvals_and_observers create(:user_delegate, assignee: new_user, assigner: work_order.proposal.individual_steps.last.user) work_order.update(approving_official: new_user) Ncr::ApprovalManager.new(work_order).setup_approvals_and_observers expect(work_order.proposal.individual_steps.count).to eq 3 expect(work_order.proposal.steps.where(user: user)).not_to be_present expect(work_order.proposal.steps.where(user: new_user)).to be_present end it "reuses existing approvals" do wo = create(:ncr_work_order, expense_type: 'BA61') manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers first_approval = wo.individual_steps.first wo.reload.setup_approvals_and_observers expect(wo.individual_steps.first).to eq(first_approval) end it "creates observers when in an emergency" do wo = create(:ncr_work_order, expense_type: 'BA61', emergency: true) manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers expect(wo.observers).to match_array([ wo.approving_official, ba61_tier_one, ba61_tier_two ].uniq) expect(wo.steps.length).to eq(0) wo.clear_association_cache expect(wo.completed?).to eq(true) end it "accounts for approver transitions when nothing's completed" do email = "[email protected]" approving_official = create(:user, email_address: email) organization = create(:whsc_organization) ba80_budget = Ncr::Mailboxes.ba80_budget wo = create( :ncr_work_order, approving_official: approving_official, expense_type: "BA61", ) manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers expect(wo.approvers).to eq [ approving_official, ba61_tier_one, ba61_tier_two ] wo.update(ncr_organization: organization) manager.setup_approvals_and_observers expect(wo.reload.approvers).to eq [ approving_official, ba61_tier_two ] approving_official_2 = create(:user, email_address: "[email protected]") wo.update(approving_official: approving_official_2) manager.setup_approvals_and_observers expect(wo.reload.approvers).to eq [ approving_official_2, ba61_tier_two ] wo.update(approving_official: approving_official) wo.update(expense_type: "BA80") manager.setup_approvals_and_observers expect(wo.reload.approvers).to eq [ approving_official, ba80_budget ] end it "unsets the approval status" do ba80_budget = Ncr::Mailboxes.ba80_budget wo = create(:ba80_ncr_work_order) manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers expect(wo.approvers).to eq [ wo.approving_official, ba80_budget ] wo.individual_steps.first.complete! wo.individual_steps.second.complete! expect(wo.reload.completed?).to be true wo.update(expense_type: 'BA61') manager.setup_approvals_and_observers expect(wo.reload.pending?).to be true end it "does not re-add observers on emergencies" do wo = create(:ncr_work_order, expense_type: 'BA61', emergency: true) manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers expect(wo.steps).to be_empty expect(wo.observers.count).to be 3 manager.setup_approvals_and_observers wo.reload expect(wo.steps).to be_empty expect(wo.observers.count).to be 3 end it "handles the delegate then update scenario" do wo = create(:ba80_ncr_work_order) manager = Ncr::ApprovalManager.new(wo) manager.setup_approvals_and_observers delegate_user = create(:user) wo.approvers.second.add_delegate(delegate_user) wo.individual_steps.second.update(completer: delegate_user) wo.individual_steps.first.complete! wo.individual_steps.second.complete! manager.setup_approvals_and_observers wo.reload expect(wo.completed?).to be true expect(wo.individual_steps.second.completer).to eq delegate_user end end describe '#system_approvers' do context "for a BA61 request" do let (:ba61_tier_one) { Ncr::Mailboxes.ba61_tier1_budget } let (:ba61_tier_two) { Ncr::Mailboxes.ba61_tier2_budget } it "skips the Tier 1 budget approver for WHSC" do ncr_organization = create(:whsc_organization) work_order = create( :ncr_work_order, expense_type: "BA61", ncr_organization: ncr_organization ) manager = Ncr::ApprovalManager.new(work_order) expect(manager.system_approvers).to eq([ ba61_tier_two ]) end it "includes the Tier 1 budget approver for an unknown organization" do work_order = create(:ncr_work_order, expense_type: "BA61") manager = Ncr::ApprovalManager.new(work_order) expect(manager.system_approvers).to eq([ ba61_tier_one, ba61_tier_two ]) end end context "for a BA60 or BA61 request" do it "uses BA61 tier1 team approver when org code matches" do org_letters = %w( 7 J 4 T 1 A C Z ) org_letters.each do |org_letter| org_code = "P11#{org_letter}XXXX" ncr_org = create(:ncr_organization, code: org_code) ba60_work_order = create(:ba60_ncr_work_order, ncr_organization: ncr_org) ba61_work_order = create(:ba61_ncr_work_order, ncr_organization: ncr_org) ba80_work_order = create(:ba80_ncr_work_order, ncr_organization: ncr_org) ba60_work_order.setup_approvals_and_observers ba61_work_order.setup_approvals_and_observers ba80_work_order.setup_approvals_and_observers expect(ba60_work_order.budget_approvals.first.user_id).to eq(Ncr::Mailboxes.ba61_tier1_budget_team.id) expect(ba61_work_order.budget_approvals.first.user_id).to eq(Ncr::Mailboxes.ba61_tier1_budget_team.id) expect(ba80_work_order.budget_approvals.first.user_id).to_not eq(Ncr::Mailboxes.ba61_tier1_budget_team.id) end end end context "for a BA80 request" do it "uses the general budget email" do ba80_budget = Ncr::Mailboxes.ba80_budget work_order = create(:ba80_ncr_work_order) manager = Ncr::ApprovalManager.new(work_order) expect(manager.system_approvers).to eq([ba80_budget]) end it "uses the OOL budget email for their org code" do budget = Ncr::Mailboxes.ool_ba80_budget ool_organization = create(:ool_organization) work_order = create(:ba80_ncr_work_order, ncr_organization: ool_organization) manager = Ncr::ApprovalManager.new(work_order) expect(manager.system_approvers).to eq([budget]) end end end end
1
17,160
just re-ordered for clarity
18F-C2
rb
@@ -69,13 +69,13 @@ func NewVolumeInfo(URL string, volname string, namespace string) (volInfo *Volum } if resp != nil && resp.StatusCode != 200 { if resp.StatusCode == 500 { - fmt.Printf("Volume: %s not found at M_API server\n", volname) + fmt.Printf("Volume: %s not found at namespace: %q\n", volname, namespace) err = util.InternalServerError } else if resp.StatusCode == 503 { fmt.Println("M_API server not reachable") err = util.ServerUnavailable } else if resp.StatusCode == 404 { - fmt.Printf("Volume: %s not found at M_API server\n", volname) + fmt.Printf("Volume: %s not found at namespace: %q error: %s\n", volname, namespace, http.StatusText(resp.StatusCode)) err = util.PageNotFound } fmt.Printf("Received an error from M_API server: statuscode: %d", resp.StatusCode)
1
/* Copyright 2017 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package command import ( "encoding/json" "errors" "flag" "fmt" "net/http" "strings" "time" "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "github.com/openebs/maya/pkg/util" "github.com/spf13/cobra" ) // VolumeInfo stores the volume information type VolumeInfo struct { Volume v1alpha1.CASVolume } const ( // VolumeAPIPath is the api path to get volume information VolumeAPIPath = "/latest/volumes/" controllerStatusOk = "running" volumeStatusOK = "Running" // JivaStorageEngine is constant for jiva engine JivaStorageEngine CASType = "jiva" // CstorStorageEngine is constant for cstor engine CstorStorageEngine CASType = "cstor" timeout = 5 * time.Second ) // CASType is engine type type CASType string // NewVolumeInfo fetches and fills CASVolume structure from URL given to it func NewVolumeInfo(URL string, volname string, namespace string) (volInfo *VolumeInfo, err error) { url := URL req, err := http.NewRequest("GET", url, nil) if err != nil { return } req.Header.Set("namespace", namespace) c := &http.Client{ Timeout: timeout, } resp, err := c.Do(req) if err != nil { fmt.Printf("Can't get a response, error found: %v", err) return } if resp != nil && resp.StatusCode != 200 { if resp.StatusCode == 500 { fmt.Printf("Volume: %s not found at M_API server\n", volname) err = util.InternalServerError } else if resp.StatusCode == 503 { fmt.Println("M_API server not reachable") err = util.ServerUnavailable } else if resp.StatusCode == 404 { fmt.Printf("Volume: %s not found at M_API server\n", volname) err = util.PageNotFound } fmt.Printf("Received an error from M_API server: statuscode: %d", resp.StatusCode) err = fmt.Errorf("Received an error from M_API server: statuscode: %d", resp.StatusCode) return } defer resp.Body.Close() casVol := v1alpha1.CASVolume{} err = json.NewDecoder(resp.Body).Decode(&casVol) if err != nil { fmt.Printf("Response decode failed: error '%+v'", err) return } if casVol.Status.Reason == "pending" { fmt.Println("VOLUME status Unknown to M_API server") err = fmt.Errorf("VOLUME status Unknown to M_API server") return } volInfo = &VolumeInfo{ Volume: casVol, } return } // GetCASType returns the CASType of the volume in lowercase func (volInfo *VolumeInfo) GetCASType() string { if len(volInfo.Volume.Spec.CasType) == 0 { return string(JivaStorageEngine) } return strings.ToLower(volInfo.Volume.Spec.CasType) } // GetClusterIP returns the ClusterIP of the cluster func (volInfo *VolumeInfo) GetClusterIP() string { if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/cluster-ips"]; ok { return val } else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/cluster-ips"]; ok { return val } return "" } // GetControllerStatus returns the status of the volume controller func (volInfo *VolumeInfo) GetControllerStatus() string { if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/controller-status"]; ok { return val } else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/controller-status"]; ok { return val } return "" } // GetIQN returns the IQN of the volume func (volInfo *VolumeInfo) GetIQN() string { return volInfo.Volume.Spec.Iqn } // GetVolumeName returns the volume name func (volInfo *VolumeInfo) GetVolumeName() string { return volInfo.Volume.ObjectMeta.Name } // GetTargetPortal returns the TargetPortal of the volume func (volInfo *VolumeInfo) GetTargetPortal() string { return volInfo.Volume.Spec.TargetPortal } // GetVolumeSize returns the capacity of the volume func (volInfo *VolumeInfo) GetVolumeSize() string { return volInfo.Volume.Spec.Capacity } // GetReplicaCount returns the volume replica count func (volInfo *VolumeInfo) GetReplicaCount() string { return volInfo.Volume.Spec.Replicas } // GetReplicaStatus returns the replica status of the volume replica func (volInfo *VolumeInfo) GetReplicaStatus() string { if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-status"]; ok { return val } else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-status"]; ok { return val } return "" } // GetReplicaIP returns the IP of volume replica func (volInfo *VolumeInfo) GetReplicaIP() string { if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-ips"]; ok { return val } else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-ips"]; ok { return val } return "" } var ( volumeCommandHelpText = ` The following commands helps in operating a Volume such as create, list, and so on. Usage: mayactl volume <subcommand> [options] [args] Examples: # Create a Volume: $ mayactl volume create --volname <vol> --size <size> # List Volumes: $ mayactl volume list # Delete a Volume: $ mayactl volume delete --volname <vol> # Delete a Volume created in 'test' namespace: $ mayactl volume delete --volname <vol> --namespace test # Statistics of a Volume: $ mayactl volume stats --volname <vol> # Statistics of a Volume created in 'test' namespace: $ mayactl volume stats --volname <vol> --namespace test # Info of a Volume: $ mayactl volume info --volname <vol> # Info of a Volume created in 'test' namespace: $ mayactl volume info --volname <vol> --namespace test ` options = &CmdVolumeOptions{ namespace: "default", } ) // CmdVolumeOptions stores information of volume being operated type CmdVolumeOptions struct { volName string sourceVolumeName string snapshotName string size string namespace string json string } // NewCmdVolume provides options for managing OpenEBS Volume func NewCmdVolume() *cobra.Command { cmd := &cobra.Command{ Use: "volume", Short: "Provides operations related to a Volume", Long: volumeCommandHelpText, } cmd.AddCommand( NewCmdVolumeCreate(), NewCmdVolumesList(), NewCmdVolumeDelete(), NewCmdVolumeStats(), NewCmdVolumeInfo(), ) cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace, "namespace name, required if volume is not in the default namespace") cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) flag.CommandLine.Parse([]string{}) return cmd } // Validate verifies whether a volume name,source name or snapshot name is provided or not followed by // stats command. It returns nil and proceeds to execute the command if there is // no error and returns an error if it is missing. func (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error { if snapshotnameverify { if len(c.snapshotName) == 0 { return errors.New("--snapname is missing. Please provide a snapshotname") } } if sourcenameverify { if len(c.sourceVolumeName) == 0 { return errors.New("--sourcevol is missing. Please specify a sourcevolumename") } } if volnameverify { if len(c.volName) == 0 { return errors.New("--volname is missing. Please specify a unique volumename") } } return nil }
1
9,290
Can you fix this also to print what is the IP address used to connect. Rename --> "M_API server" to maya apiservice
openebs-maya
go
@@ -205,6 +205,18 @@ func decodeUTF8(s string, index uintptr) (rune, uintptr) { } } +// indexByte returns the index of the first instance of c in the byte slice b, +// or -1 if c is not present in the byte slice. +//go:linkname indexByte internal/bytealg.IndexByte +func indexByte(b []byte, c byte) int { + for i, x := range b { + if x == c { + return i + } + } + return -1 +} + // indexByteString returns the index of the first instance of c in s, or -1 if c // is not present in s. //go:linkname indexByteString internal/bytealg.IndexByteString
1
package runtime // This file implements functions related to Go strings. import ( "unsafe" ) // The underlying struct for the Go string type. type _string struct { ptr *byte length uintptr } // The iterator state for a range over a string. type stringIterator struct { byteindex uintptr } // Return true iff the strings match. //go:nobounds func stringEqual(x, y string) bool { if len(x) != len(y) { return false } for i := 0; i < len(x); i++ { if x[i] != y[i] { return false } } return true } // Return true iff x < y. //go:nobounds func stringLess(x, y string) bool { l := len(x) if m := len(y); m < l { l = m } for i := 0; i < l; i++ { if x[i] < y[i] { return true } if x[i] > y[i] { return false } } return len(x) < len(y) } // Add two strings together. func stringConcat(x, y _string) _string { if x.length == 0 { return y } else if y.length == 0 { return x } else { length := x.length + y.length buf := alloc(length) memcpy(buf, unsafe.Pointer(x.ptr), x.length) memcpy(unsafe.Pointer(uintptr(buf)+x.length), unsafe.Pointer(y.ptr), y.length) return _string{ptr: (*byte)(buf), length: length} } } // Create a string from a []byte slice. func stringFromBytes(x struct { ptr *byte len uintptr cap uintptr }) _string { buf := alloc(x.len) memcpy(buf, unsafe.Pointer(x.ptr), x.len) return _string{ptr: (*byte)(buf), length: x.len} } // Convert a string to a []byte slice. func stringToBytes(x _string) (slice struct { ptr *byte len uintptr cap uintptr }) { buf := alloc(x.length) memcpy(buf, unsafe.Pointer(x.ptr), x.length) slice.ptr = (*byte)(buf) slice.len = x.length slice.cap = x.length return } // Convert a []rune slice to a string. func stringFromRunes(runeSlice []rune) (s _string) { // Count the number of characters that will be in the string. for _, r := range runeSlice { _, numBytes := encodeUTF8(r) s.length += numBytes } // Allocate memory for the string. s.ptr = (*byte)(alloc(s.length)) // Encode runes to UTF-8 and store the resulting bytes in the string. index := uintptr(0) for _, r := range runeSlice { array, numBytes := encodeUTF8(r) for _, c := range array[:numBytes] { *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(s.ptr)) + index)) = c index++ } } return } // Convert a string to []rune slice. func stringToRunes(s string) []rune { var n = 0 for range s { n++ } var r = make([]rune, n) n = 0 for _, e := range s { r[n] = e n++ } return r } // Create a string from a Unicode code point. func stringFromUnicode(x rune) _string { array, length := encodeUTF8(x) // Array will be heap allocated. // The heap most likely doesn't work with blocks below 4 bytes, so there's // no point in allocating a smaller buffer for the string here. return _string{ptr: (*byte)(unsafe.Pointer(&array)), length: length} } // Iterate over a string. // Returns (ok, key, value). func stringNext(s string, it *stringIterator) (bool, int, rune) { if len(s) <= int(it.byteindex) { return false, 0, 0 } i := int(it.byteindex) r, length := decodeUTF8(s, it.byteindex) it.byteindex += length return true, i, r } // Convert a Unicode code point into an array of bytes and its length. func encodeUTF8(x rune) ([4]byte, uintptr) { // https://stackoverflow.com/questions/6240055/manually-converting-unicode-codepoints-into-utf-8-and-utf-16 // Note: this code can probably be optimized (in size and speed). switch { case x <= 0x7f: return [4]byte{byte(x), 0, 0, 0}, 1 case x <= 0x7ff: b1 := 0xc0 | byte(x>>6) b2 := 0x80 | byte(x&0x3f) return [4]byte{b1, b2, 0, 0}, 2 case x <= 0xffff: b1 := 0xe0 | byte(x>>12) b2 := 0x80 | byte((x>>6)&0x3f) b3 := 0x80 | byte((x>>0)&0x3f) return [4]byte{b1, b2, b3, 0}, 3 case x <= 0x10ffff: b1 := 0xf0 | byte(x>>18) b2 := 0x80 | byte((x>>12)&0x3f) b3 := 0x80 | byte((x>>6)&0x3f) b4 := 0x80 | byte((x>>0)&0x3f) return [4]byte{b1, b2, b3, b4}, 4 default: // Invalid Unicode code point. return [4]byte{0xef, 0xbf, 0xbd, 0}, 3 } } // Decode a single UTF-8 character from a string. //go:nobounds func decodeUTF8(s string, index uintptr) (rune, uintptr) { remaining := uintptr(len(s)) - index // must be >= 1 before calling this function x := s[index] switch { case x&0x80 == 0x00: // 0xxxxxxx return rune(x), 1 case x&0xe0 == 0xc0: // 110xxxxx if remaining < 2 { return 0xfffd, 1 } return (rune(x&0x1f) << 6) | (rune(s[index+1]) & 0x3f), 2 case x&0xf0 == 0xe0: // 1110xxxx if remaining < 3 { return 0xfffd, 1 } return (rune(x&0x0f) << 12) | ((rune(s[index+1]) & 0x3f) << 6) | (rune(s[index+2]) & 0x3f), 3 case x&0xf8 == 0xf0: // 11110xxx if remaining < 4 { return 0xfffd, 1 } return (rune(x&0x07) << 18) | ((rune(s[index+1]) & 0x3f) << 12) | ((rune(s[index+2]) & 0x3f) << 6) | (rune(s[index+3]) & 0x3f), 4 default: return 0xfffd, 1 } } // indexByteString returns the index of the first instance of c in s, or -1 if c // is not present in s. //go:linkname indexByteString internal/bytealg.IndexByteString func indexByteString(s string, c byte) int { for i := 0; i < len(s); i++ { if s[i] == c { return i } } return -1 } // countString copies the implementation from // https://github.com/golang/go/blob/67f181bfd84dfd5942fe9a29d8a20c9ce5eb2fea/src/internal/bytealg/count_generic.go#L1 //go:linkname countString internal/bytealg.CountString func countString(s string, c byte) int { n := 0 for i := 0; i < len(s); i++ { if s[i] == c { n++ } } return n }
1
8,089
This fails on Go 1.11 because `indexByte` is already defined in strings_go111.go. I would suggest simply renaming one of the two, for example you could rename the other one to `strings_indexByte` (that will be one of the things we'll drop when we drop Go 1.11 support so it's better to make that one look ugly instead of the function that we will likely keep for a longer time).
tinygo-org-tinygo
go
@@ -868,7 +868,7 @@ CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=@level Parameter #1 MockDbType=Int32 -Parameter #1 Value=""{0}"" +Parameter #1 Value={0} Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input
1
// // Copyright (c) 2004-2021 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Database.Tests { using System; using System.Collections; using System.Collections.Generic; #if !NETSTANDARD using System.Configuration; #endif using System.Data; using System.Data.Common; using System.Globalization; using System.IO; using NLog.Config; using NLog.Targets; using Xunit; #if MONO using Mono.Data.Sqlite; using System.Data.SqlClient; #elif NETSTANDARD using Microsoft.Data.SqlClient; using Microsoft.Data.Sqlite; #else using System.Data.SqlClient; using System.Data.SQLite; #endif public class DatabaseTargetTests { public DatabaseTargetTests() { LogManager.ThrowExceptions = true; } #if !NETSTANDARD static DatabaseTargetTests() { var data = (DataSet)ConfigurationManager.GetSection("system.data"); var providerFactories = data.Tables["DBProviderFactories"]; providerFactories.Rows.Add("MockDb Provider", "MockDb Provider", "MockDb", typeof(MockDbFactory).AssemblyQualifiedName); providerFactories.AcceptChanges(); } #endif [Fact] public void SimpleDatabaseTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "FooBar", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Same(typeof(MockDbConnection), dt.ConnectionType); List<Exception> exceptions = new List<Exception>(); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add)); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') Close() Dispose() Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') Close() Dispose() Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void SimpleBatchedDatabaseTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "FooBar", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Same(typeof(MockDbConnection), dt.ConnectionType); List<Exception> exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void KeepConnectionOpenTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "FooBar", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); List<Exception> exceptions = new List<Exception>(); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add)); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void KeepConnectionOpenBatchedTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "FooBar", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); var exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void KeepConnectionOpenBatchedIsolationLevelTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "FooBar", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, IsolationLevel = IsolationLevel.ReadCommitted, }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); var exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). DbTransaction.Begin(ReadCommitted) ExecuteNonQuery (DbTransaction=Active): INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery (DbTransaction=Active): INSERT INTO FooBar VALUES('msg2') ExecuteNonQuery (DbTransaction=Active): INSERT INTO FooBar VALUES('msg3') DbTransaction.Commit() DbTransaction.Dispose() "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void KeepConnectionOpenTest2() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "Database=${logger}", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); List<Exception> exceptions = new List<Exception>(); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger2", "msg3").WithContinuation(exceptions.Add)); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg4").WithContinuation(exceptions.Add)); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('Database=MyLogger'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') Close() Dispose() Open('Database=MyLogger2'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') Close() Dispose() Open('Database=MyLogger'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg4') "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void KeepConnectionOpenBatchedTest2() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES('${message}')", ConnectionString = "Database=${logger}", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); // when we pass multiple log events in an array, the target will bucket-sort them by // connection string and group all commands for the same connection string together // to minimize number of db open/close operations // in this case msg1, msg2 and msg4 will be written together to MyLogger database // and msg3 will be written to MyLogger2 database List<Exception> exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger2", "msg3").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "MyLogger", "msg4").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('Database=MyLogger'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2') ExecuteNonQuery: INSERT INTO FooBar VALUES('msg4') Close() Dispose() Open('Database=MyLogger2'). ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3') "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void InstallParameterTest() { MockDbConnection.ClearLog(); DatabaseCommandInfo installDbCommand = new DatabaseCommandInfo { Text = $"INSERT INTO dbo.SomeTable(SomeColumn) SELECT @paramOne WHERE NOT EXISTS(SELECT 1 FROM dbo.SomeOtherTable WHERE SomeColumn = @paramOne);" }; installDbCommand.Parameters.Add(new DatabaseParameterInfo("paramOne", "SomeValue")); DatabaseTarget dt = new DatabaseTarget() { DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, CommandText = "not_important" }; dt.InstallDdlCommands.Add(installDbCommand); new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Same(typeof(MockDbConnection), dt.ConnectionType); dt.Install(new InstallationContext()); string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;'). CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=paramOne Parameter #0 Value=""SomeValue"" Add Parameter Parameter #0 ExecuteNonQuery: INSERT INTO dbo.SomeTable(SomeColumn) SELECT @paramOne WHERE NOT EXISTS(SELECT 1 FROM dbo.SomeOtherTable WHERE SomeColumn = @paramOne); Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void ParameterTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES(@msg, @lvl, @lg)", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, Parameters = { new DatabaseParameterInfo("msg", "${message}"), new DatabaseParameterInfo("lvl", "${level}"), new DatabaseParameterInfo("lg", "${logger}") } }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); // when we pass multiple log events in an array, the target will bucket-sort them by // connection string and group all commands for the same connection string together // to minimize number of db open/close operations // in this case msg1, msg2 and msg4 will be written together to MyLogger database // and msg3 will be written to MyLogger2 database List<Exception> exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Debug, "MyLogger2", "msg3").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;'). CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=msg Parameter #0 Value=""msg1"" Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=lvl Parameter #1 Value=""Info"" Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input Parameter #2 Name=lg Parameter #2 Value=""MyLogger"" Add Parameter Parameter #2 ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg) CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=msg Parameter #0 Value=""msg3"" Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=lvl Parameter #1 Value=""Debug"" Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input Parameter #2 Name=lg Parameter #2 Value=""MyLogger2"" Add Parameter Parameter #2 ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg) "; AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Theory] [InlineData(null, true, @"""2""")] [InlineData(null, false, @"""2""")] [InlineData(DbType.Int32, true, "2")] [InlineData(DbType.Int32, false, "2")] [InlineData(DbType.Object, true, @"""2""")] [InlineData(DbType.Object, false, "Info")] public void LevelParameterTest(DbType? dbType, bool noRawValue, string expectedValue) { string lvlLayout = noRawValue ? "${level:format=Ordinal:norawvalue=true}" : "${level:format=Ordinal}"; MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES(@lvl, @msg)", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, Parameters = { new DatabaseParameterInfo("lvl", lvlLayout) { DbType = dbType?.ToString() }, new DatabaseParameterInfo("msg", "${message}") } }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); List<Exception> exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = string.Format(@"Open('Server=.;Trusted_Connection=SSPI;'). CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=lvl{0} Parameter #0 Value={1} Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=msg Parameter #1 Value=""msg1"" Add Parameter Parameter #1 ExecuteNonQuery: INSERT INTO FooBar VALUES(@lvl, @msg) ", dbType.HasValue ? $"\r\nParameter #0 DbType={dbType.Value}" : "", expectedValue); AssertLog(expectedLog); MockDbConnection.ClearLog(); logFactory.Shutdown(); expectedLog = @"Close() Dispose() "; AssertLog(expectedLog); } [Theory] [InlineData("${counter}", DbType.Int16, (short)1)] [InlineData("${counter}", DbType.Int32, 1)] [InlineData("${counter}", DbType.Int64, (long)1)] [InlineData("${counter:norawvalue=true}", DbType.Int16, (short)1)] //fallback [InlineData("${counter}", DbType.VarNumeric, 1, false, true)] [InlineData("${counter}", DbType.AnsiString, "1")] [InlineData("${level}", DbType.AnsiString, "Debug")] [InlineData("${level}", DbType.Int32, 1)] [InlineData("${level}", DbType.UInt16, (ushort)1)] [InlineData("${event-properties:boolprop}", DbType.Boolean, true)] [InlineData("${event-properties:intprop}", DbType.Int32, 123)] [InlineData("${event-properties:intprop}", DbType.AnsiString, "123")] [InlineData("${event-properties:intprop}", DbType.AnsiStringFixedLength, "123")] [InlineData("${event-properties:intprop}", DbType.String, "123")] [InlineData("${event-properties:intprop}", DbType.StringFixedLength, "123")] [InlineData("${event-properties:almostAsIntProp}", DbType.Int16, (short)124)] [InlineData("${event-properties:almostAsIntProp:norawvalue=true}", DbType.Int16, (short)124)] [InlineData("${event-properties:almostAsIntProp}", DbType.Int32, 124)] [InlineData("${event-properties:almostAsIntProp}", DbType.Int64, (long)124)] [InlineData("${event-properties:almostAsIntProp}", DbType.AnsiString, " 124 ")] [InlineData("${event-properties:emptyprop}", DbType.AnsiString, "")] [InlineData("${event-properties:emptyprop}", DbType.AnsiString, null, true)] [InlineData("${event-properties:NullRawValue}", DbType.AnsiString, "")] [InlineData("${event-properties:NullRawValue}", DbType.Int32, 0)] [InlineData("${event-properties:NullRawValue}", DbType.AnsiString, null, true)] [InlineData("${event-properties:NullRawValue}", DbType.Int32, null, true)] [InlineData("${event-properties:NullRawValue}", DbType.Guid, null, true)] [InlineData("", DbType.AnsiString, null, true)] [InlineData("", DbType.Int32, null, true)] [InlineData("", DbType.Guid, null, true)] public void GetParameterValueTest(string layout, DbType dbtype, object expected, bool allowDbNull = false, bool convertToDecimal = false) { // Arrange var logEventInfo = new LogEventInfo(LogLevel.Debug, "logger1", "message 2"); logEventInfo.Properties["intprop"] = 123; logEventInfo.Properties["boolprop"] = true; logEventInfo.Properties["emptyprop"] = ""; logEventInfo.Properties["almostAsIntProp"] = " 124 "; logEventInfo.Properties["dateprop"] = new DateTime(2018, 12, 30, 13, 34, 56); var parameterName = "@param1"; var databaseParameterInfo = new DatabaseParameterInfo { DbType = dbtype.ToString(), Layout = layout, Name = parameterName, AllowDbNull = allowDbNull, }; databaseParameterInfo.SetDbType(new MockDbConnection().CreateCommand().CreateParameter()); // Act var result = new DatabaseTarget().GetDatabaseParameterValue(logEventInfo, databaseParameterInfo); //Assert if (convertToDecimal) { //fix that we can't pass decimals into attributes (InlineData) expected = (decimal)(int)expected; } Assert.Equal(expected ?? DBNull.Value, result); } [Theory] [MemberData(nameof(ConvertFromStringTestCases))] public void GetParameterValueFromStringTest(string value, DbType dbType, object expected, string format = null, CultureInfo cultureInfo = null, bool? allowDbNull = null) { var culture = System.Threading.Thread.CurrentThread.CurrentCulture; try { System.Threading.Thread.CurrentThread.CurrentCulture = new CultureInfo("NL-nl"); // Arrange var databaseParameterInfo = new DatabaseParameterInfo("@test", value) { Format = format, DbType = dbType.ToString(), Culture = cultureInfo, AllowDbNull = allowDbNull ?? false, }; databaseParameterInfo.SetDbType(new MockDbConnection().CreateCommand().CreateParameter()); // Act var result = new DatabaseTarget().GetDatabaseParameterValue(LogEventInfo.CreateNullEvent(), databaseParameterInfo); // Assert Assert.Equal(expected, result); } finally { // Restore System.Threading.Thread.CurrentThread.CurrentCulture = culture; } } public static IEnumerable<object[]> ConvertFromStringTestCases() { yield return new object[] { "true", DbType.Boolean, true }; yield return new object[] { "True", DbType.Boolean, true }; yield return new object[] { "1,2", DbType.VarNumeric, (decimal)1.2 }; yield return new object[] { "1,2", DbType.Currency, (decimal)1.2 }; yield return new object[] { "1,2", DbType.Decimal, (decimal)1.2 }; yield return new object[] { "1,2", DbType.Double, (double)1.2 }; yield return new object[] { "1,2", DbType.Single, (Single)1.2 }; yield return new object[] { "2:30", DbType.Time, new TimeSpan(0, 2, 30, 0), }; yield return new object[] { "2018-12-23 22:56", DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), }; yield return new object[] { "2018-12-23 22:56", DbType.DateTime2, new DateTime(2018, 12, 23, 22, 56, 0), }; yield return new object[] { "23-12-2018 22:56", DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), "dd-MM-yyyy HH:mm" }; yield return new object[] { new DateTime(2018, 12, 23, 22, 56, 0).ToString(CultureInfo.InvariantCulture), DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), null, CultureInfo.InvariantCulture }; yield return new object[] { "2018-12-23", DbType.Date, new DateTime(2018, 12, 23, 0, 0, 0), }; yield return new object[] { "2018-12-23 +2:30", DbType.DateTimeOffset, new DateTimeOffset(2018, 12, 23, 0, 0, 0, new TimeSpan(2, 30, 0)) }; yield return new object[] { "23-12-2018 22:56 +2:30", DbType.DateTimeOffset, new DateTimeOffset(2018, 12, 23, 22, 56, 0, new TimeSpan(2, 30, 0)), "dd-MM-yyyy HH:mm zzz" }; yield return new object[] { "3888CCA3-D11D-45C9-89A5-E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287") }; yield return new object[] { "3888CCA3D11D45C989A5E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287") }; yield return new object[] { "3888CCA3D11D45C989A5E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287"), "N" }; yield return new object[] { "3", DbType.Byte, (byte)3 }; yield return new object[] { "3", DbType.SByte, (sbyte)3 }; yield return new object[] { "3", DbType.Int16, (short)3 }; yield return new object[] { " 3 ", DbType.Int16, (short)3 }; yield return new object[] { "3", DbType.Int32, 3 }; yield return new object[] { "3", DbType.Int64, (long)3 }; yield return new object[] { "3", DbType.UInt16, (ushort)3 }; yield return new object[] { "3", DbType.UInt32, (uint)3 }; yield return new object[] { "3", DbType.UInt64, (ulong)3 }; yield return new object[] { "3", DbType.AnsiString, "3" }; yield return new object[] { "${db-null}", DbType.DateTime, DBNull.Value }; yield return new object[] { "${event-properties:userid}", DbType.Int32, 0 }; yield return new object[] { "${date:universalTime=true:format=yyyy-MM:norawvalue=true}", DbType.DateTime, DateTime.SpecifyKind(DateTime.UtcNow.Date.AddDays(-DateTime.UtcNow.Day + 1), DateTimeKind.Unspecified) }; yield return new object[] { "${shortdate:universalTime=true}", DbType.DateTime, DateTime.UtcNow.Date, null, null, true }; yield return new object[] { "${shortdate:universalTime=true}", DbType.DateTime, DateTime.UtcNow.Date, null, null, false }; yield return new object[] { "${shortdate:universalTime=true}", DbType.String, DateTime.UtcNow.Date.ToString("yyyy-MM-dd"), null, null, true }; yield return new object[] { "${shortdate:universalTime=true}", DbType.String, DateTime.UtcNow.Date.ToString("yyyy-MM-dd"), null, null, false }; } [Fact] public void ParameterFacetTest() { MockDbConnection.ClearLog(); DatabaseTarget dt = new DatabaseTarget() { CommandText = "INSERT INTO FooBar VALUES(@msg, @lvl, @lg)", DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, KeepConnection = true, Parameters = { new DatabaseParameterInfo("msg", "${message}") { Precision = 3, Scale = 7, Size = 9, }, new DatabaseParameterInfo("lvl", "${level}") { Scale = 7 }, new DatabaseParameterInfo("lg", "${logger}") { Precision = 0 }, } }; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; Assert.Same(typeof(MockDbConnection), dt.ConnectionType); // when we pass multiple log events in an array, the target will bucket-sort them by // connection string and group all commands for the same connection string together // to minimize number of db open/close operations // in this case msg1, msg2 and msg4 will be written together to MyLogger database // and msg3 will be written to MyLogger2 database var exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Debug, "MyLogger2", "msg3").WithContinuation(exceptions.Add), }; dt.WriteAsyncLogEvents(events); logFactory.Shutdown(); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;'). CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=msg Parameter #0 Size=9 Parameter #0 Precision=3 Parameter #0 Scale=7 Parameter #0 Value=""msg1"" Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=lvl Parameter #1 Scale=7 Parameter #1 Value=""Info"" Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input Parameter #2 Name=lg Parameter #2 Value=""MyLogger"" Add Parameter Parameter #2 ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg) CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=msg Parameter #0 Size=9 Parameter #0 Precision=3 Parameter #0 Scale=7 Parameter #0 Value=""msg3"" Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=lvl Parameter #1 Scale=7 Parameter #1 Value=""Debug"" Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input Parameter #2 Name=lg Parameter #2 Value=""MyLogger2"" Add Parameter Parameter #2 ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg) Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void ParameterDbTypePropertyNameTest() { MockDbConnection.ClearLog(); var dbProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().SetupExtensions(ext => ext.RegisterAssembly(typeof(DatabaseTarget).Assembly)).LoadConfigurationFromXml($@" <nlog> <targets> <target name='dt' type='Database'> <DBProvider>{dbProvider}</DBProvider> <ConnectionString>FooBar</ConnectionString> <CommandText>INSERT INTO FooBar VALUES(@message,@level,@date)</CommandText> <parameter name='@message' layout='${{message}}'/> <parameter name='@level' dbType=' MockDbType.int32 ' layout='${{level:format=Ordinal}}'/> <parameter name='@date' dbType='MockDbType.DateTime' format='yyyy-MM-dd HH:mm:ss.fff' layout='${{date:format=yyyy-MM-dd HH\:mm\:ss.fff}}'/> </target> </targets> </nlog>").LogFactory; DatabaseTarget dt = logFactory.Configuration.FindTargetByName("dt") as DatabaseTarget; Assert.NotNull(dt); List<Exception> exceptions = new List<Exception>(); var alogEvent = new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add); dt.WriteAsyncLogEvent(alogEvent); dt.WriteAsyncLogEvent(alogEvent); foreach (var ex in exceptions) { Assert.Null(ex); } string expectedLog = @"Open('FooBar'). CreateParameter(0) Parameter #0 Direction=Input Parameter #0 Name=@message Parameter #0 Value=""msg1"" Add Parameter Parameter #0 CreateParameter(1) Parameter #1 Direction=Input Parameter #1 Name=@level Parameter #1 MockDbType=Int32 Parameter #1 Value=""{0}"" Add Parameter Parameter #1 CreateParameter(2) Parameter #2 Direction=Input Parameter #2 Name=@date Parameter #2 MockDbType=DateTime Parameter #2 Value={1} Add Parameter Parameter #2 ExecuteNonQuery: INSERT INTO FooBar VALUES(@message,@level,@date) Close() Dispose() "; expectedLog = string.Format(expectedLog + expectedLog, LogLevel.Info.Ordinal, alogEvent.LogEvent.TimeStamp.ToString(CultureInfo.InvariantCulture)); AssertLog(expectedLog); } [Fact] public void ConnectionStringBuilderTest1() { DatabaseTarget dt; dt = new DatabaseTarget(); Assert.Equal("Server=.;Trusted_Connection=SSPI;", GetConnectionString(dt)); dt = new DatabaseTarget(); dt.DBHost = "${logger}"; Assert.Equal("Server=Logger1;Trusted_Connection=SSPI;", GetConnectionString(dt)); dt = new DatabaseTarget(); dt.DBHost = "HOST1"; dt.DBDatabase = "${logger}"; Assert.Equal("Server=HOST1;Trusted_Connection=SSPI;Database=Logger1", GetConnectionString(dt)); dt = new DatabaseTarget(); dt.DBHost = "HOST1"; dt.DBDatabase = "${logger}"; dt.DBUserName = "user1"; dt.DBPassword = "password1"; Assert.Equal("Server=HOST1;User id=user1;Password=password1;Database=Logger1", GetConnectionString(dt)); dt = new DatabaseTarget(); dt.ConnectionString = "customConnectionString42"; dt.DBHost = "HOST1"; dt.DBDatabase = "${logger}"; dt.DBUserName = "user1"; dt.DBPassword = "password1"; Assert.Equal("customConnectionString42", GetConnectionString(dt)); } [Fact] public void DatabaseExceptionTest1() { MockDbConnection.ClearLog(); var exceptions = new List<Exception>(); var db = new DatabaseTarget(); db.CommandText = "not important"; db.ConnectionString = "cannotconnect"; db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(db)).LogFactory; try { LogManager.ThrowExceptions = false; db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add)); } finally { LogManager.ThrowExceptions = true; logFactory.Shutdown(); } Assert.Single(exceptions); Assert.NotNull(exceptions[0]); Assert.Equal("Cannot open fake database.", exceptions[0].Message); Assert.Equal("Open('cannotconnect').\r\n", MockDbConnection.Log); } [Fact] public void DatabaseExceptionTest2() { MockDbConnection.ClearLog(); var exceptions = new List<Exception>(); var db = new DatabaseTarget(); db.CommandText = "not important"; db.ConnectionString = "cannotexecute"; db.KeepConnection = true; db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(db)).LogFactory; try { LogManager.ThrowExceptions = false; db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add)); db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add)); db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add)); } finally { LogManager.ThrowExceptions = true; logFactory.Shutdown(); } Assert.Equal(3, exceptions.Count); Assert.NotNull(exceptions[0]); Assert.NotNull(exceptions[1]); Assert.NotNull(exceptions[2]); Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message); string expectedLog = @"Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void DatabaseBatchExceptionTest() { MockDbConnection.ClearLog(); var exceptions = new List<Exception>(); var db = new DatabaseTarget(); db.CommandText = "not important"; db.ConnectionString = "cannotexecute"; db.KeepConnection = true; db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(db)).LogFactory; try { LogManager.ThrowExceptions = false; var events = new[] { LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), }; db.WriteAsyncLogEvents(events); } finally { LogManager.ThrowExceptions = true; logFactory.Shutdown(); } Assert.Equal(3, exceptions.Count); Assert.NotNull(exceptions[0]); Assert.NotNull(exceptions[1]); Assert.NotNull(exceptions[2]); Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message); string expectedLog = @"Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void DatabaseBatchIsolationLevelExceptionTest() { MockDbConnection.ClearLog(); var exceptions = new List<Exception>(); var db = new DatabaseTarget(); db.CommandText = "not important"; db.ConnectionString = "cannotexecute"; db.KeepConnection = true; db.IsolationLevel = IsolationLevel.Serializable; db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(db)).LogFactory; try { LogManager.ThrowExceptions = false; var events = new[] { LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), }; db.WriteAsyncLogEvents(events); } finally { LogManager.ThrowExceptions = true; logFactory.Shutdown(); } Assert.Equal(3, exceptions.Count); Assert.NotNull(exceptions[0]); Assert.NotNull(exceptions[1]); Assert.NotNull(exceptions[2]); Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message); string expectedLog = @"Open('cannotexecute'). DbTransaction.Begin(Serializable) ExecuteNonQuery (DbTransaction=Active): not important DbTransaction.Rollback() DbTransaction.Dispose() Close() Dispose() "; AssertLog(expectedLog); } [Fact] public void DatabaseExceptionTest3() { MockDbConnection.ClearLog(); var exceptions = new List<Exception>(); var db = new DatabaseTarget(); db.CommandText = "not important"; db.ConnectionString = "cannotexecute"; db.KeepConnection = true; db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(db)).LogFactory; try { LogManager.ThrowExceptions = false; db.WriteAsyncLogEvents( LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add), LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add)); } finally { LogManager.ThrowExceptions = true; logFactory.Shutdown(); } Assert.Equal(3, exceptions.Count); Assert.NotNull(exceptions[0]); Assert.NotNull(exceptions[1]); Assert.NotNull(exceptions[2]); Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message); Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message); string expectedLog = @"Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() Open('cannotexecute'). ExecuteNonQuery: not important Close() Dispose() "; AssertLog(expectedLog); } #if !MONO && !NETSTANDARD [Fact] public void ConnectionStringNameInitTest() { var dt = new DatabaseTarget { ConnectionStringName = "MyConnectionString", CommandText = "notimportant", }; Assert.Same(ConfigurationManager.ConnectionStrings, dt.ConnectionStringsSettings); dt.ConnectionStringsSettings = new ConnectionStringSettingsCollection() { new ConnectionStringSettings("MyConnectionString", "cs1", "MockDb"), }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Same(MockDbFactory.Instance, dt.ProviderFactory); Assert.Equal("cs1", dt.ConnectionString.Render(LogEventInfo.CreateNullEvent())); } [Fact] public void ConnectionStringNameNegativeTest_if_ThrowConfigExceptions() { LogManager.ThrowConfigExceptions = true; var dt = new DatabaseTarget { ConnectionStringName = "MyConnectionString", CommandText = "notimportant", ConnectionStringsSettings = new ConnectionStringSettingsCollection(), }; try { new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.True(false, "Exception expected."); } catch (NLogConfigurationException configurationException) { Assert.Equal( "Connection string 'MyConnectionString' is not declared in <connectionStrings /> section.", configurationException.Message); } } [Fact] public void ProviderFactoryInitTest() { var dt = new DatabaseTarget(); dt.DBProvider = "MockDb"; dt.CommandText = "Notimportant"; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Same(MockDbFactory.Instance, dt.ProviderFactory); dt.OpenConnection("myConnectionString", null); Assert.Equal(1, MockDbConnection2.OpenCount); Assert.Equal("myConnectionString", MockDbConnection2.LastOpenConnectionString); } #endif [Fact] public void AccessTokenShouldBeSet() { // Arrange var accessToken = "123"; MockDbConnection.ClearLog(); var databaseTarget = new DatabaseTarget { DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, CommandText = "command1", }; databaseTarget.ConnectionProperties.Add(new DatabaseObjectPropertyInfo() { Name = "AccessToken", Layout = accessToken }); new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)); // Act var connection1 = databaseTarget.OpenConnection(".", null); var connection2 = databaseTarget.OpenConnection(".", null); // Twice because we use compiled method on 2nd attempt // Assert var sqlConnection1 = Assert.IsType<MockDbConnection>(connection1); Assert.Equal(accessToken, sqlConnection1.AccessToken); // Verify dynamic setter method invoke assigns correctly var sqlConnection2 = Assert.IsType<MockDbConnection>(connection2); Assert.Equal(accessToken, sqlConnection2.AccessToken); // Verify compiled method also assigns correctly } [Fact] public void AccessTokenWithInvalidTypeCannotBeSet() { // Arrange MockDbConnection.ClearLog(); var databaseTarget = new DatabaseTarget { DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, CommandText = "command1", }; databaseTarget.ConnectionProperties.Add(new DatabaseObjectPropertyInfo() { Name = "AccessToken", Layout = "abc", PropertyType = typeof(int) }); var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)).LogFactory; // Act + Assert Assert.Throws<InvalidCastException>(() => logFactory.GetCurrentClassLogger().Info("Hello")); } [Fact] public void CommandTimeoutShouldBeSet() { // Arrange var commandTimeout = "123"; MockDbConnection.ClearLog(); var databaseTarget = new DatabaseTarget { DBProvider = typeof(MockDbConnection).AssemblyQualifiedName, CommandText = "command1", }; databaseTarget.CommandProperties.Add(new DatabaseObjectPropertyInfo() { Name = "CommandTimeout", Layout = commandTimeout, PropertyType = typeof(int) }); new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)); // Act var connection = databaseTarget.OpenConnection(".", null); var command1 = databaseTarget.CreateDbCommand(LogEventInfo.CreateNullEvent(), connection); var command2 = databaseTarget.CreateDbCommand(LogEventInfo.CreateNullEvent(), connection); // Twice because we use compiled method on 2nd attempt // Assert var sqlCommand1 = Assert.IsType<MockDbCommand>(command1); Assert.Equal(commandTimeout, sqlCommand1.CommandTimeout.ToString()); // Verify dynamic setter method invoke assigns correctly var sqlCommand2 = Assert.IsType<MockDbCommand>(command2); Assert.Equal(commandTimeout, sqlCommand2.CommandTimeout.ToString()); // Verify compiled method also assigns correctly } [Fact] public void SqlServerShorthandNotationTest() { foreach (string provName in new[] { "microsoft", "msde", "mssql", "sqlserver" }) { var dt = new DatabaseTarget() { Name = "myTarget", DBProvider = provName, ConnectionString = "notimportant", CommandText = "notimportant", }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Equal(typeof(SqlConnection), dt.ConnectionType); } } #if !NETSTANDARD [Fact] public void OleDbShorthandNotationTest() { var dt = new DatabaseTarget() { Name = "myTarget", DBProvider = "oledb", ConnectionString = "notimportant", CommandText = "notimportant", }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Equal(typeof(System.Data.OleDb.OleDbConnection), dt.ConnectionType); } [Fact] public void OdbcShorthandNotationTest() { var dt = new DatabaseTarget() { Name = "myTarget", DBProvider = "odbc", ConnectionString = "notimportant", CommandText = "notimportant", }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)); Assert.Equal(typeof(System.Data.Odbc.OdbcConnection), dt.ConnectionType); } #endif [Fact] public void SQLite_InstallAndLogMessageProgrammatically() { SQLiteTest sqlLite = new SQLiteTest("TestLogProgram.sqlite"); // delete database if it for some reason already exists sqlLite.TryDropDatabase(); LogManager.ThrowExceptions = true; try { sqlLite.CreateDatabase(); var connectionString = sqlLite.GetConnectionString(); DatabaseTarget testTarget = new DatabaseTarget("TestSqliteTarget"); testTarget.ConnectionString = connectionString; testTarget.DBProvider = GetSQLiteDbProvider(); testTarget.InstallDdlCommands.Add(new DatabaseCommandInfo() { CommandType = CommandType.Text, Text = $@" CREATE TABLE NLogTestTable ( Id int PRIMARY KEY, Message varchar(100) NULL)" }); using (var context = new InstallationContext()) { testTarget.Install(context); } // check so table is created var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogTestTable'"); Assert.Equal("NLogTestTable", tableName); testTarget.CommandText = "INSERT INTO NLogTestTable (Message) VALUES (@message)"; testTarget.Parameters.Add(new DatabaseParameterInfo("@message", new NLog.Layouts.SimpleLayout("${message}"))); // setup logging var config = new LoggingConfiguration(); config.AddTarget("dbTarget", testTarget); var rule = new LoggingRule("*", LogLevel.Debug, testTarget); config.LoggingRules.Add(rule); // try to log LogManager.Configuration = config; var logger = LogManager.GetLogger("testLog"); logger.Debug("Test debug message"); logger.Error("Test error message"); // will return long var logcount = sqlLite.IssueScalarQuery("SELECT count(1) FROM NLogTestTable"); Assert.Equal((long)2, logcount); } finally { sqlLite.TryDropDatabase(); } } private string GetSQLiteDbProvider() { #if MONO return "Mono.Data.Sqlite.SqliteConnection, Mono.Data.Sqlite"; #elif NETSTANDARD return "Microsoft.Data.Sqlite.SqliteConnection, Microsoft.Data.Sqlite"; #else return "System.Data.SQLite.SQLiteConnection, System.Data.SQLite"; #endif } [Fact] public void SQLite_InstallAndLogMessage() { SQLiteTest sqlLite = new SQLiteTest("TestLogXml.sqlite"); // delete database just in case sqlLite.TryDropDatabase(); LogManager.ThrowExceptions = true; try { sqlLite.CreateDatabase(); var connectionString = sqlLite.GetConnectionString(); string dbProvider = GetSQLiteDbProvider(); // Create log with xml config var logFactory = new LogFactory().Setup().SetupExtensions(ext => ext.RegisterAssembly(typeof(DatabaseTarget).Assembly)).LoadConfigurationFromXml(@" <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'> <targets> <target name='database' xsi:type='Database' dbProvider=""" + dbProvider + @""" connectionstring=""" + connectionString + @""" commandText='insert into NLogSqlLiteTest (Message) values (@message);'> <parameter name='@message' layout='${message}' /> <install-command ignoreFailures=""false"" text=""CREATE TABLE NLogSqlLiteTest ( Id int PRIMARY KEY, Message varchar(100) NULL );""/> </target> </targets> <rules> <logger name='*' writeTo='database' /> </rules> </nlog>").LogFactory; //install InstallationContext context = new InstallationContext(); logFactory.Configuration.Install(context); // check so table is created var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTest'"); Assert.Equal("NLogSqlLiteTest", tableName); // start to log var logger = logFactory.GetLogger("SQLite"); logger.Debug("Test"); logger.Error("Test2"); logger.Info("Final test row"); // returns long var logcount = sqlLite.IssueScalarQuery("SELECT count(1) FROM NLogSqlLiteTest"); Assert.Equal((long)3, logcount); } finally { sqlLite.TryDropDatabase(); } } [Fact] public void SQLite_InstallTest() { SQLiteTest sqlLite = new SQLiteTest("TestInstallXml.sqlite"); // delete database just in case sqlLite.TryDropDatabase(); LogManager.ThrowExceptions = true; try { sqlLite.CreateDatabase(); var connectionString = sqlLite.GetConnectionString(); string dbProvider = GetSQLiteDbProvider(); // Create log with xml config var logFactory = new LogFactory().Setup().SetupExtensions(ext => ext.RegisterAssembly(typeof(DatabaseTarget).Assembly)).LoadConfigurationFromXml(@" <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'> <targets> <target name='database' xsi:type='Database' dbProvider=""" + dbProvider + @""" connectionstring=""" + connectionString + @""" commandText='not_important'> <install-command ignoreFailures=""false"" text=""CREATE TABLE NLogSqlLiteTestAppNames ( Id int PRIMARY KEY, Name varchar(100) NULL ); INSERT INTO NLogSqlLiteTestAppNames(Id, Name) VALUES (1, @appName);""> <parameter name='@appName' layout='MyApp' /> </install-command> </target> </targets> <rules> <logger name='*' writeTo='database' /> </rules> </nlog>").LogFactory; //install InstallationContext context = new InstallationContext(); logFactory.Configuration.Install(context); // check so table is created var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTestAppNames'"); Assert.Equal("NLogSqlLiteTestAppNames", tableName); // returns long var logcount = sqlLite.IssueScalarQuery("SELECT count(*) FROM NLogSqlLiteTestAppNames"); Assert.Equal((long)1, logcount); // check if entry was correct var entryValue = sqlLite.IssueScalarQuery("SELECT Name FROM NLogSqlLiteTestAppNames WHERE ID = 1"); Assert.Equal("MyApp", entryValue); } finally { sqlLite.TryDropDatabase(); } } [Fact] public void SQLite_InstallProgramaticallyTest() { SQLiteTest sqlLite = new SQLiteTest("TestInstallProgram.sqlite"); // delete database just in case sqlLite.TryDropDatabase(); LogManager.ThrowExceptions = true; try { sqlLite.CreateDatabase(); var connectionString = sqlLite.GetConnectionString(); string dbProvider = GetSQLiteDbProvider(); DatabaseTarget testTarget = new DatabaseTarget("TestSqliteTargetInstallProgram"); testTarget.ConnectionString = connectionString; testTarget.DBProvider = dbProvider; DatabaseCommandInfo installDbCommand = new DatabaseCommandInfo { Text = "CREATE TABLE NLogSqlLiteTestAppNames (Id int PRIMARY KEY, Name varchar(100) NULL); " + "INSERT INTO NLogSqlLiteTestAppNames(Id, Name) SELECT 1, @paramOne WHERE NOT EXISTS(SELECT 1 FROM NLogSqlLiteTestAppNames WHERE Name = @paramOne);" }; installDbCommand.Parameters.Add(new DatabaseParameterInfo("@paramOne", "MyApp")); testTarget.InstallDdlCommands.Add(installDbCommand); //install InstallationContext context = new InstallationContext(); testTarget.Install(context); // check so table is created var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTestAppNames'"); Assert.Equal("NLogSqlLiteTestAppNames", tableName); // returns long var logcount = sqlLite.IssueScalarQuery("SELECT count(*) FROM NLogSqlLiteTestAppNames"); Assert.Equal((long)1, logcount); // check if entry was correct var entryValue = sqlLite.IssueScalarQuery("SELECT Name FROM NLogSqlLiteTestAppNames WHERE ID = 1"); Assert.Equal("MyApp", entryValue); } finally { sqlLite.TryDropDatabase(); } } private LogFactory SetupSqliteConfigWithInvalidInstallCommand(string databaseName) { var nlogXmlConfig = @" <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='false'> <targets> <target name='database' xsi:type='Database' dbProvider='{0}' connectionstring='{1}' commandText='insert into RethrowingInstallExceptionsTable (Message) values (@message);'> <parameter name='@message' layout='${{message}}' /> <install-command text='THIS IS NOT VALID SQL;' /> </target> </targets> <rules> <logger name='*' writeTo='database' /> </rules> </nlog>"; // Use an in memory SQLite database // See https://www.sqlite.org/inmemorydb.html #if NETSTANDARD var connectionString = "Data Source=:memory:"; #else var connectionString = "Uri=file::memory:;Version=3"; #endif return new LogFactory().Setup().SetupExtensions(ext => ext.RegisterAssembly(typeof(DatabaseTarget).Assembly)).LoadConfigurationFromXml(String.Format(nlogXmlConfig, GetSQLiteDbProvider(), connectionString)).LogFactory; } [Fact] public void NotRethrowingInstallExceptions() { try { LogManager.ThrowExceptions = false; var logFactory = SetupSqliteConfigWithInvalidInstallCommand("not_rethrowing_install_exceptions"); // Default InstallationContext should not rethrow exceptions InstallationContext context = new InstallationContext(); Assert.False(context.IgnoreFailures, "Failures should not be ignored by default"); Assert.False(context.ThrowExceptions, "Exceptions should not be thrown by default"); var exRecorded = Record.Exception(() => logFactory.Configuration.Install(context)); Assert.Null(exRecorded); } finally { LogManager.ThrowExceptions = true; } } [Fact] public void RethrowingInstallExceptions() { try { LogManager.ThrowExceptions = false; var logFactory = SetupSqliteConfigWithInvalidInstallCommand("rethrowing_install_exceptions"); InstallationContext context = new InstallationContext() { ThrowExceptions = true }; Assert.True(context.ThrowExceptions); // Sanity check #if MONO || NETSTANDARD Assert.Throws<SqliteException>(() => logFactory.Configuration.Install(context)); #else Assert.Throws<SQLiteException>(() => logFactory.Configuration.Install(context)); #endif } finally { LogManager.ThrowExceptions = true; } } [Fact] public void SqlServer_NoTargetInstallException() { if (IsLinux()) { Console.WriteLine("skipping test SqlServer_NoTargetInstallException because we are running in Travis"); return; } bool isAppVeyor = IsAppVeyor(); SqlServerTest.TryDropDatabase(isAppVeyor); try { SqlServerTest.CreateDatabase(isAppVeyor); var connectionString = SqlServerTest.GetConnectionString(isAppVeyor); DatabaseTarget testTarget = new DatabaseTarget("TestDbTarget"); testTarget.ConnectionString = connectionString; testTarget.InstallDdlCommands.Add(new DatabaseCommandInfo() { CommandType = CommandType.Text, Text = $@" IF EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = 'dbo' AND TABLE_NAME = 'NLogTestTable') RETURN CREATE TABLE [Dbo].[NLogTestTable] ( [ID] [int] IDENTITY(1,1) NOT NULL, [MachineName] [nvarchar](200) NULL)" }); using (var context = new InstallationContext()) { testTarget.Install(context); } var tableCatalog = SqlServerTest.IssueScalarQuery(isAppVeyor, @"SELECT TABLE_NAME FROM NLogTest.INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_NAME = 'NLogTestTable' "); //check if table exists Assert.Equal("NLogTestTable", tableCatalog); } finally { SqlServerTest.TryDropDatabase(isAppVeyor); } } [Fact] public void SqlServer_InstallAndLogMessage() { if (IsLinux()) { Console.WriteLine("skipping test SqlServer_InstallAndLogMessage because we are running in Travis"); return; } bool isAppVeyor = IsAppVeyor(); SqlServerTest.TryDropDatabase(isAppVeyor); try { SqlServerTest.CreateDatabase(isAppVeyor); var connectionString = SqlServerTest.GetConnectionString(IsAppVeyor()); var logFactory = new LogFactory().Setup().SetupExtensions(ext => ext.RegisterAssembly(typeof(DatabaseTarget).Assembly)).LoadConfigurationFromXml(@" <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'> <targets> <target name='database' xsi:type='Database' connectionstring=""" + connectionString + @""" commandText='insert into dbo.NLogSqlServerTest (Uid, LogDate) values (@uid, @logdate);'> <parameter name='@uid' layout='${event-properties:uid}' /> <parameter name='@logdate' layout='${date}' /> <install-command ignoreFailures=""false"" text=""CREATE TABLE dbo.NLogSqlServerTest ( Id int NOT NULL IDENTITY(1,1) PRIMARY KEY CLUSTERED, Uid uniqueidentifier NULL, LogDate date NULL );""/> </target> </targets> <rules> <logger name='*' writeTo='database' /> </rules> </nlog>").LogFactory; //install InstallationContext context = new InstallationContext(); logFactory.Configuration.Install(context); var tableCatalog = SqlServerTest.IssueScalarQuery(isAppVeyor, @"SELECT TABLE_CATALOG FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'Dbo' AND TABLE_NAME = 'NLogSqlServerTest'"); //check if table exists Assert.Equal("NLogTest", tableCatalog); var logger = logFactory.GetLogger("A"); var target = logFactory.Configuration.FindTargetByName<DatabaseTarget>("database"); var uid = new Guid("e7c648b4-3508-4df2-b001-753148659d6d"); var logEvent = new LogEventInfo(LogLevel.Info, null, null); logEvent.Properties["uid"] = uid; logger.Log(logEvent); var count = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT count(1) FROM dbo.NLogSqlServerTest"); Assert.Equal(1, count); var result = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT Uid FROM dbo.NLogSqlServerTest"); Assert.Equal(uid, result); var result2 = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT LogDate FROM dbo.NLogSqlServerTest"); Assert.Equal(DateTime.Today, result2); } finally { SqlServerTest.TryDropDatabase(isAppVeyor); } } #if !NETSTANDARD [Fact] public void GetProviderNameFromAppConfig() { LogManager.ThrowExceptions = true; var databaseTarget = new DatabaseTarget() { Name = "myTarget", ConnectionStringName = "test_connectionstring_with_providerName", CommandText = "notimportant", }; databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection() { new ConnectionStringSettings("test_connectionstring_without_providerName", "some connectionstring"), new ConnectionStringSettings("test_connectionstring_with_providerName", "some connectionstring", "System.Data.SqlClient"), }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)); Assert.NotNull(databaseTarget.ProviderFactory); Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType()); } [Fact] public void DontRequireProviderNameInAppConfig() { LogManager.ThrowExceptions = true; var databaseTarget = new DatabaseTarget() { Name = "myTarget", ConnectionStringName = "test_connectionstring_without_providerName", CommandText = "notimportant", DBProvider = "System.Data.SqlClient" }; databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection() { new ConnectionStringSettings("test_connectionstring_without_providerName", "some connectionstring"), new ConnectionStringSettings("test_connectionstring_with_providerName", "some connectionstring", "System.Data.SqlClient"), }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)); Assert.NotNull(databaseTarget.ProviderFactory); Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType()); } [Fact] public void GetProviderNameFromConnectionString() { LogManager.ThrowExceptions = true; var databaseTarget = new DatabaseTarget() { Name = "myTarget", ConnectionStringName = "test_connectionstring_with_providerName", CommandText = "notimportant", }; databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection() { new ConnectionStringSettings("test_connectionstring_with_providerName", "metadata=res://*/Model.csdl|res://*/Model.ssdl|res://*/Model.msl;provider=System.Data.SqlClient;provider connection string=\"data source=192.168.0.100;initial catalog=TEST_DB;user id=myUser;password=SecretPassword;multipleactiveresultsets=True;application name=EntityFramework\"", "System.Data.EntityClient"), }; new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(databaseTarget)); Assert.NotNull(databaseTarget.ProviderFactory); Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType()); Assert.Equal("data source=192.168.0.100;initial catalog=TEST_DB;user id=myUser;password=SecretPassword;multipleactiveresultsets=True;application name=EntityFramework", ((NLog.Layouts.SimpleLayout)databaseTarget.ConnectionString).FixedText); } #endif [Theory] [InlineData("localhost", "MyDatabase", "user", "password", "Server=localhost;User id=user;Password=password;Database=MyDatabase")] [InlineData("localhost", null, "user", "password", "Server=localhost;User id=user;Password=password;")] [InlineData("localhost", "MyDatabase", "user", "'password'", "Server=localhost;User id=user;Password='password';Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "\"password\"", "Server=localhost;User id=user;Password=\"password\";Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "pa;ssword", "Server=localhost;User id=user;Password='pa;ssword';Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "pa'ssword", "Server=localhost;User id=user;Password=\"pa'ssword\";Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "pa'\"ssword", "Server=localhost;User id=user;Password=\"pa'\"\"ssword\";Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "pa\"ssword", "Server=localhost;User id=user;Password='pa\"ssword';Database=MyDatabase")] [InlineData("localhost", "MyDatabase", "user", "", "Server=localhost;User id=user;Password=;Database=MyDatabase")] [InlineData("localhost", "MyDatabase", null, "password", "Server=localhost;Trusted_Connection=SSPI;Database=MyDatabase")] public void DatabaseConnectionStringTest(string host, string database, string username, string password, string expected) { // Arrange var databaseTarget = new NonLoggingDatabaseTarget() { CommandText = "DoSomething", Name = "myTarget", DBHost = host, DBDatabase = database, DBUserName = username, DBPassword = password }; var logEventInfo = LogEventInfo.CreateNullEvent(); // Act var result = databaseTarget.GetRenderedConnectionString(logEventInfo); // Assert Assert.Equal(expected, result); } [Theory] [InlineData("password", "password")] [InlineData("", "")] [InlineData("password'", "\"password'\"")] public void DatabaseConnectionStringViaVariableTest(string password, string expectedPassword) { // Arrange var databaseTarget = new NonLoggingDatabaseTarget() { CommandText = "DoSomething", Name = "myTarget", DBHost = "localhost", DBDatabase = "MyDatabase", DBUserName = "user", DBPassword = "${event-properties:myPassword}" }; var logEventInfo = LogEventInfo.Create(LogLevel.Debug, "logger1", "message1"); logEventInfo.Properties["myPassword"] = password; // Act var result = databaseTarget.GetRenderedConnectionString(logEventInfo); // Assert var expected = $"Server=localhost;User id=user;Password={expectedPassword};Database=MyDatabase"; Assert.Equal(expected, result); } private static void AssertLog(string expectedLog) { Assert.Equal(expectedLog.Replace("\r", ""), MockDbConnection.Log.Replace("\r", "")); } private string GetConnectionString(DatabaseTarget dt) { MockDbConnection.ClearLog(); dt.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName; dt.CommandText = "NotImportant"; var logFactory = new LogFactory().Setup().LoadConfiguration(cfg => cfg.Configuration.AddRuleForAllLevels(dt)).LogFactory; var exceptions = new List<Exception>(); dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "Logger1", "msg1").WithContinuation(exceptions.Add)); logFactory.Shutdown(); return MockDbConnection.LastConnectionString; } public class MockDbConnection : IDbConnection { public static string Log { get; private set; } public static string LastConnectionString { get; private set; } public MockDbConnection() { } public MockDbConnection(string connectionString) { ConnectionString = connectionString; } public IDbTransaction BeginTransaction(IsolationLevel il) { AddToLog("DbTransaction.Begin({0})", il); return new MockDbTransaction(this, il); } public IDbTransaction BeginTransaction() { return BeginTransaction(IsolationLevel.ReadCommitted); } public void ChangeDatabase(string databaseName) { throw new NotImplementedException(); } public void Close() { AddToLog("Close()"); } public string ConnectionString { get; set; } public int ConnectionTimeout => throw new NotImplementedException(); public IDbCommand CreateCommand() { return new MockDbCommand() { Connection = this }; } public string Database => throw new NotImplementedException(); public void Open() { LastConnectionString = ConnectionString; AddToLog("Open('{0}').", ConnectionString); if (ConnectionString == "cannotconnect") { throw new ApplicationException("Cannot open fake database."); } } public ConnectionState State => throw new NotImplementedException(); public string AccessToken { get; set; } public void Dispose() { AddToLog("Dispose()"); } public static void ClearLog() { Log = string.Empty; } public void AddToLog(string message, params object[] args) { if (args.Length > 0) { message = string.Format(CultureInfo.InvariantCulture, message, args); } Log += message + "\r\n"; } } private class NonLoggingDatabaseTarget : DatabaseTarget { public string GetRenderedConnectionString(LogEventInfo logEventInfo) { return base.BuildConnectionString(logEventInfo); } } private class MockDbCommand : IDbCommand { private int paramCount; private IDataParameterCollection parameters; public MockDbCommand() { parameters = new MockParameterCollection(this); } public void Cancel() { throw new NotImplementedException(); } public string CommandText { get; set; } public int CommandTimeout { get; set; } public CommandType CommandType { get; set; } public IDbConnection Connection { get; set; } public IDbTransaction Transaction { get; set; } public IDbDataParameter CreateParameter() { ((MockDbConnection)Connection).AddToLog("CreateParameter({0})", paramCount); return new MockDbParameter(this, paramCount++); } public int ExecuteNonQuery() { if (Transaction != null) ((MockDbConnection)Connection).AddToLog("ExecuteNonQuery (DbTransaction={0}): {1}", Transaction.Connection != null ? "Active" : "Disposed", CommandText); else ((MockDbConnection)Connection).AddToLog("ExecuteNonQuery: {0}", CommandText); if (Connection.ConnectionString == "cannotexecute") { throw new ApplicationException("Failure during ExecuteNonQuery"); } return 0; } public IDataReader ExecuteReader(CommandBehavior behavior) { throw new NotImplementedException(); } public IDataReader ExecuteReader() { throw new NotImplementedException(); } public object ExecuteScalar() { throw new NotImplementedException(); } public IDataParameterCollection Parameters => parameters; public void Prepare() { throw new NotImplementedException(); } public UpdateRowSource UpdatedRowSource { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } public void Dispose() { Transaction = null; Connection = null; } } private class MockDbParameter : IDbDataParameter { private readonly MockDbCommand mockDbCommand; private readonly int paramId; private string parameterName; private object parameterValue; private DbType parameterType; public MockDbParameter(MockDbCommand mockDbCommand, int paramId) { this.mockDbCommand = mockDbCommand; this.paramId = paramId; } public DbType DbType { get { return parameterType; } set { ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} DbType={1}", paramId, value); parameterType = value; } } public DbType MockDbType { get { return parameterType; } set { ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} MockDbType={1}", paramId, value); parameterType = value; } } public ParameterDirection Direction { get => throw new NotImplementedException(); set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Direction={1}", paramId, value); } public bool IsNullable => throw new NotImplementedException(); public string ParameterName { get => parameterName; set { ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Name={1}", paramId, value); parameterName = value; } } public string SourceColumn { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } public DataRowVersion SourceVersion { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } public object Value { get => parameterValue; set { object valueOutput = value is string valueString ? $"\"{valueString}\"" : value; ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Value={1}", paramId, valueOutput); parameterValue = value; } } public byte Precision { get => throw new NotImplementedException(); set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Precision={1}", paramId, value); } public byte Scale { get => throw new NotImplementedException(); set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Scale={1}", paramId, value); } public int Size { get => throw new NotImplementedException(); set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Size={1}", paramId, value); } public override string ToString() { return "Parameter #" + paramId; } } private class MockParameterCollection : IDataParameterCollection { private readonly MockDbCommand command; public MockParameterCollection(MockDbCommand command) { this.command = command; } public IEnumerator GetEnumerator() { throw new NotImplementedException(); } public void CopyTo(Array array, int index) { throw new NotImplementedException(); } public int Count => throw new NotImplementedException(); public object SyncRoot => throw new NotImplementedException(); public bool IsSynchronized => throw new NotImplementedException(); public int Add(object value) { ((MockDbConnection)command.Connection).AddToLog("Add Parameter {0}", value); return 0; } public bool Contains(object value) { throw new NotImplementedException(); } public void Clear() { throw new NotImplementedException(); } public int IndexOf(object value) { throw new NotImplementedException(); } public void Insert(int index, object value) { throw new NotImplementedException(); } public void Remove(object value) { throw new NotImplementedException(); } public void RemoveAt(int index) { throw new NotImplementedException(); } object IList.this[int index] { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } public bool IsReadOnly => throw new NotImplementedException(); public bool IsFixedSize => throw new NotImplementedException(); public bool Contains(string parameterName) { throw new NotImplementedException(); } public int IndexOf(string parameterName) { throw new NotImplementedException(); } public void RemoveAt(string parameterName) { throw new NotImplementedException(); } object IDataParameterCollection.this[string parameterName] { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } } public class MockDbTransaction : IDbTransaction { public IDbConnection Connection { get; private set; } public IsolationLevel IsolationLevel { get; } public MockDbTransaction(IDbConnection connection, IsolationLevel isolationLevel) { Connection = connection; IsolationLevel = isolationLevel; } public void Commit() { if (Connection is null) throw new NotSupportedException(); ((MockDbConnection)Connection).AddToLog("DbTransaction.Commit()"); } public void Dispose() { ((MockDbConnection)Connection).AddToLog("DbTransaction.Dispose()"); Connection = null; } public void Rollback() { if (Connection is null) throw new NotSupportedException(); ((MockDbConnection)Connection).AddToLog("DbTransaction.Rollback()"); } } public class MockDbFactory : DbProviderFactory { public static readonly MockDbFactory Instance = new MockDbFactory(); public override DbConnection CreateConnection() { return new MockDbConnection2(); } } public class MockDbConnection2 : DbConnection { public static int OpenCount { get; private set; } public static string LastOpenConnectionString { get; private set; } protected override DbTransaction BeginDbTransaction(IsolationLevel isolationLevel) { throw new NotImplementedException(); } public override void ChangeDatabase(string databaseName) { throw new NotImplementedException(); } public override void Close() { throw new NotImplementedException(); } public override string ConnectionString { get; set; } protected override DbCommand CreateDbCommand() { throw new NotImplementedException(); } public override string DataSource => throw new NotImplementedException(); public override string Database => throw new NotImplementedException(); public override void Open() { LastOpenConnectionString = ConnectionString; OpenCount++; } public override string ServerVersion => throw new NotImplementedException(); public override ConnectionState State => throw new NotImplementedException(); } private class SQLiteTest { private string dbName = "NLogTest.sqlite"; private string connectionString; public SQLiteTest(string dbName) { this.dbName = dbName; #if NETSTANDARD connectionString = "Data Source=" + this.dbName; #else connectionString = "Data Source=" + this.dbName + ";Version=3;"; #endif } public string GetConnectionString() { return connectionString; } public void CreateDatabase() { if (DatabaseExists()) { TryDropDatabase(); } SQLiteHandler.CreateDatabase(dbName); } public bool DatabaseExists() { return File.Exists(dbName); } public void TryDropDatabase() { try { if (DatabaseExists()) { File.Delete(dbName); } } catch { } } public void IssueCommand(string commandString) { using (DbConnection connection = SQLiteHandler.GetConnection(connectionString)) { connection.Open(); using (DbCommand command = SQLiteHandler.CreateCommand(commandString, connection)) { command.ExecuteNonQuery(); } } } public object IssueScalarQuery(string commandString) { using (DbConnection connection = SQLiteHandler.GetConnection(connectionString)) { connection.Open(); using (DbCommand command = SQLiteHandler.CreateCommand(commandString, connection)) { var scalar = command.ExecuteScalar(); return scalar; } } } } private static class SQLiteHandler { public static void CreateDatabase(string dbName) { #if NETSTANDARD // Using ConnectionString Mode=ReadWriteCreate #elif MONO SqliteConnection.CreateFile(dbName); #else SQLiteConnection.CreateFile(dbName); #endif } public static DbConnection GetConnection(string connectionString) { #if NETSTANDARD return new SqliteConnection(connectionString + ";Mode=ReadWriteCreate;"); #elif MONO return new SqliteConnection(connectionString); #else return new SQLiteConnection(connectionString); #endif } public static DbCommand CreateCommand(string commandString, DbConnection connection) { #if MONO || NETSTANDARD return new SqliteCommand(commandString, (SqliteConnection)connection); #else return new SQLiteCommand(commandString, (SQLiteConnection)connection); #endif } } private static class SqlServerTest { static SqlServerTest() { } public static string GetConnectionString(bool isAppVeyor) { string connectionString = string.Empty; #if !NETSTANDARD connectionString = ConfigurationManager.AppSettings["SqlServerTestConnectionString"]; #endif if (String.IsNullOrWhiteSpace(connectionString)) { connectionString = isAppVeyor ? AppVeyorConnectionStringNLogTest : LocalConnectionStringNLogTest; } return connectionString; } /// <summary> /// AppVeyor connectionstring for SQL 2019, see https://www.appveyor.com/docs/services-databases/ /// </summary> private const string AppVeyorConnectionStringMaster = @"Server=(local)\SQL2019;Database=master;User ID=sa;Password=Password12!"; private const string AppVeyorConnectionStringNLogTest = @"Server=(local)\SQL2019;Database=NLogTest;User ID=sa;Password=Password12!"; private const string LocalConnectionStringMaster = @"Data Source=(localdb)\MSSQLLocalDB; Database=master; Integrated Security=True;"; private const string LocalConnectionStringNLogTest = @"Data Source=(localdb)\MSSQLLocalDB; Database=NLogTest; Integrated Security=True;"; public static void CreateDatabase(bool isAppVeyor) { var connectionString = GetMasterConnectionString(isAppVeyor); IssueCommand(IsAppVeyor(), "CREATE DATABASE NLogTest", connectionString); } public static bool NLogTestDatabaseExists(bool isAppVeyor) { var connectionString = GetMasterConnectionString(isAppVeyor); var dbId = IssueScalarQuery(IsAppVeyor(), "select db_id('NLogTest')", connectionString); return dbId != null && dbId != DBNull.Value; } private static string GetMasterConnectionString(bool isAppVeyor) { return isAppVeyor ? AppVeyorConnectionStringMaster : LocalConnectionStringMaster; } public static void IssueCommand(bool isAppVeyor, string commandString, string connectionString = null) { using (var connection = new SqlConnection(connectionString ?? GetConnectionString(isAppVeyor))) { connection.Open(); if (connectionString is null) connection.ChangeDatabase("NLogTest"); using (var command = new SqlCommand(commandString, connection)) { command.ExecuteNonQuery(); } } } public static object IssueScalarQuery(bool isAppVeyor, string commandString, string connectionString = null) { using (var connection = new SqlConnection(connectionString ?? GetConnectionString(isAppVeyor))) { connection.Open(); if (connectionString is null) connection.ChangeDatabase("NLogTest"); using (var command = new SqlCommand(commandString, connection)) { var scalar = command.ExecuteScalar(); return scalar; } } } /// <summary> /// Try dropping. IF fail, not exception /// </summary> public static bool TryDropDatabase(bool isAppVeyor) { try { if (NLogTestDatabaseExists(isAppVeyor)) { var connectionString = GetMasterConnectionString(isAppVeyor); IssueCommand(isAppVeyor, "ALTER DATABASE [NLogTest] SET SINGLE_USER WITH ROLLBACK IMMEDIATE; DROP DATABASE NLogTest;", connectionString); return true; } return false; } catch (Exception) { //ignore return false; } } } protected static bool IsAppVeyor() { var val = Environment.GetEnvironmentVariable("APPVEYOR"); return val != null && val.Equals("true", StringComparison.OrdinalIgnoreCase); } protected static bool IsLinux() { var val = Environment.GetEnvironmentVariable("WINDIR"); return string.IsNullOrEmpty(val); } } }
1
23,028
Now `dbType=' MockDbType.int32 '` is correctly interpreted and the value is rendered as an integer instead of a string; hence no more double quotes.
NLog-NLog
.cs
@@ -63,7 +63,7 @@ class Comment extends AbstractAggregateRoot public function changeContent(string $contend): void { if ($contend !== $this->content) { - $this->apply(new CommentContentChangedEvent($this->id, $this->content, $contend, new \DateTime())); + $this->apply(new CommentContentChangedEvent($this->id, $contend, new \DateTime())); } }
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\Comment\Domain\Entity; use Ergonode\SharedKernel\Domain\Aggregate\UserId; use Ergonode\EventSourcing\Domain\AbstractAggregateRoot; use Ergonode\Comment\Domain\Event\CommentContentChangedEvent; use Ergonode\Comment\Domain\Event\CommentCreatedEvent; use Ergonode\SharedKernel\Domain\Aggregate\CommentId; use JMS\Serializer\Annotation as JMS; use Ramsey\Uuid\Uuid; class Comment extends AbstractAggregateRoot { /** * @JMS\Type("Ergonode\SharedKernel\Domain\Aggregate\CommentId") */ private CommentId $id; /** * @JMS\Type("Ergonode\SharedKernel\Domain\Aggregate\UserId") */ private UserId $authorId; /** * @JMS\Type("uuid") */ private Uuid $objectId; /** * @JMS\Type("DateTime") */ private \DateTime $createdAt; /** * @JMS\Type("DateTime") */ private ?\DateTime $editedAt = null; /** * @JMS\Type("string") */ private string $content; /** * @throws \Exception */ public function __construct(CommentId $id, Uuid $objectId, UserId $authorId, string $content) { $this->apply(new CommentCreatedEvent($id, $authorId, $objectId, $content, new \DateTime())); } /** * @throws \Exception */ public function changeContent(string $contend): void { if ($contend !== $this->content) { $this->apply(new CommentContentChangedEvent($this->id, $this->content, $contend, new \DateTime())); } } public function getId(): CommentId { return $this->id; } public function getAuthorId(): UserId { return $this->authorId; } public function getObjectId(): Uuid { return $this->objectId; } public function getCreatedAt(): \DateTime { return $this->createdAt; } public function getEditedAt(): ?\DateTime { return $this->editedAt; } public function getContent(): string { return $this->content; } protected function applyCommentCreatedEvent(CommentCreatedEvent $event): void { $this->id = $event->getAggregateId(); $this->authorId = $event->getAuthorId(); $this->objectId = $event->getObjectId(); $this->createdAt = $event->getCreatedAt(); $this->content = $event->getContent(); } protected function applyCommentContentChangedEvent(CommentContentChangedEvent $event): void { $this->content = $event->getTo(); $this->editedAt = $event->getEditedAt(); } }
1
9,096
typo in `$contend`
ergonode-backend
php
@@ -28,9 +28,13 @@ import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.Tables; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.NoSuchTableException; +import com.netflix.iceberg.exceptions.RuntimeIOException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; + +import java.io.IOException; import java.util.Map; import static com.netflix.iceberg.TableMetadata.newTableMetadata;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg.hadoop; import com.netflix.iceberg.BaseTable; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableMetadata; import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.Tables; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.NoSuchTableException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import java.util.Map; import static com.netflix.iceberg.TableMetadata.newTableMetadata; /** * Implementation of Iceberg tables that uses the Hadoop FileSystem * to store metadata and manifests. */ public class HadoopTables implements Tables, Configurable { private Configuration conf; public HadoopTables() { this(new Configuration()); } public HadoopTables(Configuration conf) { this.conf = conf; } /** * Loads the table location from a FileSystem path location. * * @param location a path URI (e.g. hdfs:///warehouse/my_table/) * @return table implementation */ @Override public Table load(String location) { TableOperations ops = newTableOps(location); if (ops.current() == null) { throw new NoSuchTableException("Table does not exist at location: " + location); } return new BaseTable(ops, location); } /** * Create a table using the FileSystem implementation resolve from * location. * * @param schema iceberg schema used to create the table * @param spec partition specification * @param location a path URI (e.g. hdfs:///warehouse/my_table) * @return newly created table implementation */ @Override public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String location) { TableOperations ops = newTableOps(location); if (ops.current() != null) { throw new AlreadyExistsException("Table already exists at location: " + location); } TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties); ops.commit(null, metadata); return new BaseTable(ops, location); } private TableOperations newTableOps(String location) { return new HadoopTableOperations(new Path(location), conf); } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } }
1
12,975
Nit: empty line
apache-iceberg
java
@@ -185,6 +185,10 @@ func (fs *KBFSOpsStandard) DeleteFavorite(ctx context.Context, } func (fs *KBFSOpsStandard) getOpsNoAdd(fb FolderBranch) *folderBranchOps { + if fb == (FolderBranch{}) { + panic("zero FolderBranch in getOps") + } + fs.opsLock.RLock() if ops, ok := fs.ops[fb]; ok { fs.opsLock.RUnlock()
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "sync" "time" "github.com/keybase/client/go/logger" "golang.org/x/net/context" ) // KBFSOpsStandard implements the KBFSOps interface, and is go-routine // safe by forwarding requests to individual per-folder-branch // handlers that are go-routine-safe. type KBFSOpsStandard struct { config Config log logger.Logger deferLog logger.Logger ops map[FolderBranch]*folderBranchOps opsByFav map[Favorite]*folderBranchOps opsLock sync.RWMutex // reIdentifyControlChan controls reidentification. // Sending a value to this channel forces all fbos // to be marked for revalidation. // Closing this channel will shutdown the reidentification // watcher. reIdentifyControlChan chan struct{} favs *Favorites currentStatus kbfsCurrentStatus } var _ KBFSOps = (*KBFSOpsStandard)(nil) // NewKBFSOpsStandard constructs a new KBFSOpsStandard object. func NewKBFSOpsStandard(config Config) *KBFSOpsStandard { log := config.MakeLogger("") kops := &KBFSOpsStandard{ config: config, log: log, deferLog: log.CloneWithAddedDepth(1), ops: make(map[FolderBranch]*folderBranchOps), opsByFav: make(map[Favorite]*folderBranchOps), reIdentifyControlChan: make(chan struct{}), favs: NewFavorites(config), } kops.currentStatus.Init() go kops.markForReIdentifyIfNeededLoop() return kops } func (fs *KBFSOpsStandard) markForReIdentifyIfNeededLoop() { maxValid := fs.config.TLFValidDuration() // Tests and some users fail to set this properly. if maxValid <= 10*time.Second || maxValid > 24*365*time.Hour { maxValid = tlfValidDurationDefault } // Tick ten times the rate of valid duration allowing only overflows of +-10% ticker := time.NewTicker(maxValid / 10) for { var now time.Time select { // Normal case: feed the current time from config and mark fbos needing validation. case <-ticker.C: now = fs.config.Clock().Now() // Mark everything for reidentification via now being the empty value or quit. case _, ok := <-fs.reIdentifyControlChan: if !ok { ticker.Stop() return } } fs.markForReIdentifyIfNeeded(now, maxValid) } } func (fs *KBFSOpsStandard) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) { fs.opsLock.Lock() defer fs.opsLock.Unlock() for _, fbo := range fs.ops { fbo.markForReIdentifyIfNeeded(now, maxValid) } } // Shutdown safely shuts down any background goroutines that may have // been launched by KBFSOpsStandard. func (fs *KBFSOpsStandard) Shutdown() error { close(fs.reIdentifyControlChan) var errors []error if err := fs.favs.Shutdown(); err != nil { errors = append(errors, err) } for _, ops := range fs.ops { if err := ops.Shutdown(); err != nil { errors = append(errors, err) // Continue on and try to shut down the other FBOs. } } if len(errors) == 1 { return errors[0] } else if len(errors) > 1 { // Aggregate errors return fmt.Errorf("Multiple errors on shutdown: %v", errors) } return nil } // PushConnectionStatusChange pushes human readable connection status changes. func (fs *KBFSOpsStandard) PushConnectionStatusChange(service string, newStatus error) { fs.currentStatus.PushConnectionStatusChange(service, newStatus) } // GetFavorites implements the KBFSOps interface for // KBFSOpsStandard. func (fs *KBFSOpsStandard) GetFavorites(ctx context.Context) ( []Favorite, error) { return fs.favs.Get(ctx) } // RefreshCachedFavorites implements the KBFSOps interface for // KBFSOpsStandard. func (fs *KBFSOpsStandard) RefreshCachedFavorites(ctx context.Context) { fs.favs.RefreshCache(ctx) } // AddFavorite implements the KBFSOps interface for KBFSOpsStandard. func (fs *KBFSOpsStandard) AddFavorite(ctx context.Context, fav Favorite) error { kbpki := fs.config.KBPKI() _, _, err := kbpki.GetCurrentUserInfo(ctx) isLoggedIn := err == nil if isLoggedIn { err := fs.favs.Add(ctx, favToAdd{Favorite: fav, created: false}) if err != nil { return err } } return nil } // DeleteFavorite implements the KBFSOps interface for // KBFSOpsStandard. func (fs *KBFSOpsStandard) DeleteFavorite(ctx context.Context, fav Favorite) error { kbpki := fs.config.KBPKI() _, _, err := kbpki.GetCurrentUserInfo(ctx) isLoggedIn := err == nil // Let this ops remove itself, if we have one available. ops := func() *folderBranchOps { fs.opsLock.Lock() defer fs.opsLock.Unlock() return fs.opsByFav[fav] }() if ops != nil { err := ops.deleteFromFavorites(ctx, fs.favs) if _, ok := err.(OpsCantHandleFavorite); !ok { return err } // If the ops couldn't handle the delete, fall through to // going directly via Favorites. } if isLoggedIn { err := fs.favs.Delete(ctx, fav) if err != nil { return err } } // TODO: Shut down the running folderBranchOps, if one exists? // What about open file handles? return nil } func (fs *KBFSOpsStandard) getOpsNoAdd(fb FolderBranch) *folderBranchOps { fs.opsLock.RLock() if ops, ok := fs.ops[fb]; ok { fs.opsLock.RUnlock() return ops } fs.opsLock.RUnlock() fs.opsLock.Lock() defer fs.opsLock.Unlock() // look it up again in case someone else got the lock ops, ok := fs.ops[fb] if !ok { // TODO: add some interface for specifying the type of the // branch; for now assume online and read-write. ops = newFolderBranchOps(fs.config, fb, standard) fs.ops[fb] = ops } return ops } func (fs *KBFSOpsStandard) getOps( ctx context.Context, fb FolderBranch) *folderBranchOps { ops := fs.getOpsNoAdd(fb) if err := ops.addToFavorites(ctx, fs.favs, false); err != nil { // Failure to favorite shouldn't cause a failure. Just log // and move on. fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err) } return ops } func (fs *KBFSOpsStandard) getOpsByNode(ctx context.Context, node Node) *folderBranchOps { return fs.getOps(ctx, node.GetFolderBranch()) } func (fs *KBFSOpsStandard) getOpsByHandle(ctx context.Context, handle *TlfHandle, fb FolderBranch) *folderBranchOps { ops := fs.getOps(ctx, fb) fs.opsLock.Lock() defer fs.opsLock.Unlock() // Track under its name, so we can later tell it to remove itself // from the favorites list. TODO: fix this when unresolved // assertions are allowed and become resolved. fs.opsByFav[handle.ToFavorite()] = ops return ops } // GetTLFCryptKeys implements the KBFSOps interface for // KBFSOpsStandard func (fs *KBFSOpsStandard) GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) (keys []TLFCryptKey, id TlfID, err error) { var rmd ImmutableRootMetadata _, rmd, id, err = fs.getOrInitializeNewMDMaster( ctx, fs.config.MDOps(), tlfHandle, true) if err != nil { return keys, id, err } keys, err = fs.config.KeyManager().GetTLFCryptKeyOfAllGenerations(ctx, rmd) return keys, id, err } func (fs *KBFSOpsStandard) getOrInitializeNewMDMaster( ctx context.Context, mdops MDOps, h *TlfHandle, create bool) (initialized bool, md ImmutableRootMetadata, id TlfID, err error) { id, md, err = mdops.GetForHandle(ctx, h, Merged) if err != nil { return false, ImmutableRootMetadata{}, id, err } if md != (ImmutableRootMetadata{}) { return false, md, id, nil } if id == (TlfID{}) { return false, ImmutableRootMetadata{}, id, errors.New("No ID or MD") } if !create { return false, ImmutableRootMetadata{}, id, nil } // Init new MD. fb := FolderBranch{Tlf: id, Branch: MasterBranch} fops := fs.getOpsByHandle(ctx, h, fb) err = fops.SetInitialHeadToNew(ctx, id, h) if err != nil { return false, ImmutableRootMetadata{}, id, err } id, md, err = mdops.GetForHandle(ctx, h, Merged) if err != nil { return true, ImmutableRootMetadata{}, id, err } return true, md, id, err } // getMaybeCreateRootNode is called for GetOrCreateRootNode and GetRootNode. func (fs *KBFSOpsStandard) getMaybeCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName, create bool) ( node Node, ei EntryInfo, err error) { fs.log.CDebugf(ctx, "getMaybeCreateRootNode(%s, %v, %v)", h.GetCanonicalPath(), branch, create) defer func() { fs.deferLog.CDebugf(ctx, "Done: %#v", err) }() // Do GetForHandle() unlocked -- no cache lookups, should be fine mdops := fs.config.MDOps() // TODO: only do this the first time, cache the folder ID after that _, md, err := mdops.GetForHandle(ctx, h, Unmerged) if err != nil { return nil, EntryInfo{}, err } if md == (ImmutableRootMetadata{}) { var id TlfID var initialized bool initialized, md, id, err = fs.getOrInitializeNewMDMaster(ctx, mdops, h, create) if err != nil { return nil, EntryInfo{}, err } if initialized { fb := FolderBranch{Tlf: id, Branch: MasterBranch} fops := fs.getOpsByHandle(ctx, h, fb) node, ei, _, err = fops.getRootNode(ctx) if err != nil { return nil, EntryInfo{}, err } if err := fops.addToFavoritesByHandle(ctx, fs.favs, h, true); err != nil { // Failure to favorite shouldn't cause a failure. Just log // and move on. fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err) } return node, ei, nil } if !create && md == (ImmutableRootMetadata{}) { kbpki := fs.config.KBPKI() err := identifyHandle(ctx, kbpki, kbpki, h) if err != nil { return nil, EntryInfo{}, err } fb := FolderBranch{Tlf: id, Branch: MasterBranch} fops := fs.getOpsByHandle(ctx, h, fb) if err := fops.addToFavoritesByHandle(ctx, fs.favs, h, false); err != nil { // Failure to favorite shouldn't cause a failure. Just log // and move on. fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err) } return nil, EntryInfo{}, nil } } fb := FolderBranch{Tlf: md.TlfID(), Branch: branch} // we might not be able to read the metadata if we aren't in the // key group yet. if err := isReadableOrError(ctx, fs.config, md.ReadOnly()); err != nil { fs.opsLock.Lock() defer fs.opsLock.Unlock() // If we already have an FBO for this ID, trigger a rekey // prompt in the background, if possible. if ops, ok := fs.ops[fb]; ok { fs.log.CDebugf(ctx, "Triggering a paper prompt rekey on folder "+ "access due to unreadable MD for %s", h.GetCanonicalPath()) go ops.rekeyWithPrompt() } return nil, EntryInfo{}, err } ops := fs.getOpsByHandle(ctx, h, fb) err = ops.SetInitialHeadFromServer(ctx, md) if err != nil { return nil, EntryInfo{}, err } node, ei, _, err = ops.getRootNode(ctx) if err != nil { return nil, EntryInfo{}, err } if err := ops.addToFavoritesByHandle(ctx, fs.favs, h, false); err != nil { // Failure to favorite shouldn't cause a failure. Just log // and move on. fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err) } return node, ei, nil } // GetOrCreateRootNode implements the KBFSOps interface for // KBFSOpsStandard func (fs *KBFSOpsStandard) GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return fs.getMaybeCreateRootNode(ctx, h, branch, true) } // GetRootNode implements the KBFSOps interface for // KBFSOpsStandard. Returns a nil Node and nil error // if the tlf does not exist but there is no error present. func (fs *KBFSOpsStandard) GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) { return fs.getMaybeCreateRootNode(ctx, h, branch, false) } // GetDirChildren implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) GetDirChildren(ctx context.Context, dir Node) ( map[string]EntryInfo, error) { ops := fs.getOpsByNode(ctx, dir) return ops.GetDirChildren(ctx, dir) } // Lookup implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Lookup(ctx context.Context, dir Node, name string) ( Node, EntryInfo, error) { ops := fs.getOpsByNode(ctx, dir) return ops.Lookup(ctx, dir, name) } // Stat implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Stat(ctx context.Context, node Node) ( EntryInfo, error) { ops := fs.getOpsByNode(ctx, node) return ops.Stat(ctx, node) } // CreateDir implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) CreateDir( ctx context.Context, dir Node, name string) (Node, EntryInfo, error) { ops := fs.getOpsByNode(ctx, dir) return ops.CreateDir(ctx, dir, name) } // CreateFile implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) CreateFile( ctx context.Context, dir Node, name string, isExec bool, excl Excl) ( Node, EntryInfo, error) { ops := fs.getOpsByNode(ctx, dir) return ops.CreateFile(ctx, dir, name, isExec, excl) } // CreateLink implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) CreateLink( ctx context.Context, dir Node, fromName string, toPath string) ( EntryInfo, error) { ops := fs.getOpsByNode(ctx, dir) return ops.CreateLink(ctx, dir, fromName, toPath) } // RemoveDir implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) RemoveDir( ctx context.Context, dir Node, name string) error { ops := fs.getOpsByNode(ctx, dir) return ops.RemoveDir(ctx, dir, name) } // RemoveEntry implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) RemoveEntry( ctx context.Context, dir Node, name string) error { ops := fs.getOpsByNode(ctx, dir) return ops.RemoveEntry(ctx, dir, name) } // Rename implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Rename( ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) error { oldFB := oldParent.GetFolderBranch() newFB := newParent.GetFolderBranch() // only works for nodes within the same topdir if oldFB != newFB { return RenameAcrossDirsError{} } ops := fs.getOpsByNode(ctx, oldParent) return ops.Rename(ctx, oldParent, oldName, newParent, newName) } // Read implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Read( ctx context.Context, file Node, dest []byte, off int64) ( numRead int64, err error) { ops := fs.getOpsByNode(ctx, file) return ops.Read(ctx, file, dest, off) } // Write implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Write( ctx context.Context, file Node, data []byte, off int64) error { ops := fs.getOpsByNode(ctx, file) return ops.Write(ctx, file, data, off) } // Truncate implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Truncate( ctx context.Context, file Node, size uint64) error { ops := fs.getOpsByNode(ctx, file) return ops.Truncate(ctx, file, size) } // SetEx implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) SetEx( ctx context.Context, file Node, ex bool) error { ops := fs.getOpsByNode(ctx, file) return ops.SetEx(ctx, file, ex) } // SetMtime implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) SetMtime( ctx context.Context, file Node, mtime *time.Time) error { ops := fs.getOpsByNode(ctx, file) return ops.SetMtime(ctx, file, mtime) } // Sync implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Sync(ctx context.Context, file Node) error { ops := fs.getOpsByNode(ctx, file) return ops.Sync(ctx, file) } // FolderStatus implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) FolderStatus( ctx context.Context, folderBranch FolderBranch) ( FolderBranchStatus, <-chan StatusUpdate, error) { ops := fs.getOps(ctx, folderBranch) return ops.FolderStatus(ctx, folderBranch) } // Status implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Status(ctx context.Context) ( KBFSStatus, <-chan StatusUpdate, error) { username, _, err := fs.config.KBPKI().GetCurrentUserInfo(ctx) var usageBytes int64 = -1 var limitBytes int64 = -1 // Don't request the quota info until we're sure we've // authenticated with our password. TODO: fix this in the // service/GUI by handling multiple simultaneous passphrase // requests at once. if err == nil && fs.config.MDServer().IsConnected() { quotaInfo, err := fs.config.BlockServer().GetUserQuotaInfo(ctx) if err == nil { limitBytes = quotaInfo.Limit if quotaInfo.Total != nil { usageBytes = quotaInfo.Total.Bytes[UsageWrite] } else { usageBytes = 0 } } } failures, ch := fs.currentStatus.CurrentStatus() var jServerStatus *JournalServerStatus jServer, jErr := GetJournalServer(fs.config) if jErr == nil { status := jServer.Status() jServerStatus = &status } return KBFSStatus{ CurrentUser: username.String(), IsConnected: fs.config.MDServer().IsConnected(), UsageBytes: usageBytes, LimitBytes: limitBytes, FailingServices: failures, JournalServer: jServerStatus, }, ch, err } // UnstageForTesting implements the KBFSOps interface for KBFSOpsStandard // TODO: remove once we have automatic conflict resolution func (fs *KBFSOpsStandard) UnstageForTesting( ctx context.Context, folderBranch FolderBranch) error { ops := fs.getOps(ctx, folderBranch) return ops.UnstageForTesting(ctx, folderBranch) } // Rekey implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) Rekey(ctx context.Context, id TlfID) error { // We currently only support rekeys of master branches. ops := fs.getOpsNoAdd(FolderBranch{Tlf: id, Branch: MasterBranch}) return ops.Rekey(ctx, id) } // SyncFromServerForTesting implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) SyncFromServerForTesting( ctx context.Context, folderBranch FolderBranch) error { ops := fs.getOps(ctx, folderBranch) return ops.SyncFromServerForTesting(ctx, folderBranch) } // GetUpdateHistory implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (history TLFUpdateHistory, err error) { ops := fs.getOps(ctx, folderBranch) return ops.GetUpdateHistory(ctx, folderBranch) } // GetEditHistory implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) GetEditHistory(ctx context.Context, folderBranch FolderBranch) (edits TlfWriterEdits, err error) { ops := fs.getOps(ctx, folderBranch) return ops.GetEditHistory(ctx, folderBranch) } // GetNodeMetadata implements the KBFSOps interface for KBFSOpsStandard func (fs *KBFSOpsStandard) GetNodeMetadata(ctx context.Context, node Node) ( NodeMetadata, error) { ops := fs.getOpsByNode(ctx, node) return ops.GetNodeMetadata(ctx, node) } // Notifier: var _ Notifier = (*KBFSOpsStandard)(nil) // RegisterForChanges implements the Notifer interface for KBFSOpsStandard func (fs *KBFSOpsStandard) RegisterForChanges( folderBranches []FolderBranch, obs Observer) error { for _, fb := range folderBranches { // TODO: add branch parameter to notifier interface ops := fs.getOpsNoAdd(fb) return ops.RegisterForChanges(obs) } return nil } // UnregisterFromChanges implements the Notifer interface for KBFSOpsStandard func (fs *KBFSOpsStandard) UnregisterFromChanges( folderBranches []FolderBranch, obs Observer) error { for _, fb := range folderBranches { // TODO: add branch parameter to notifier interface ops := fs.getOpsNoAdd(fb) return ops.UnregisterFromChanges(obs) } return nil }
1
13,125
I figured this was more fool-proof than trying to plumb the error up from everywhere.
keybase-kbfs
go
@@ -192,8 +192,8 @@ func (c *client) initClient() { c.cid = atomic.AddUint64(&s.gcid, 1) c.bw = bufio.NewWriterSize(c.nc, startBufSize) c.subs = make(map[string]*subscription) - c.debug = (atomic.LoadInt32(&debug) != 0) - c.trace = (atomic.LoadInt32(&trace) != 0) + c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0) + c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0) // This is a scratch buffer used for processMsg() // The msg header starts with "MSG ",
1
// Copyright 2012-2016 Apcera Inc. All rights reserved. package server import ( "bufio" "crypto/tls" "encoding/json" "fmt" "math/rand" "net" "sync" "sync/atomic" "time" ) // Type of client connection. const ( // CLIENT is an end user. CLIENT = iota // ROUTER is another router in the cluster. ROUTER ) const ( // Original Client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ ClientProtoZero = iota // This signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. ClientProtoInfo ) func init() { rand.Seed(time.Now().UnixNano()) } const ( // Scratch buffer size for the processMsg() calls. msgScratchSize = 512 msgHeadProto = "MSG " ) // For controlling dynamic buffer sizes. const ( startBufSize = 512 // For INFO/CONNECT block minBufSize = 128 maxBufSize = 65536 ) // Represent client booleans with a bitmask type clientFlag byte // Some client state represented as flags const ( connectReceived clientFlag = 1 << iota // The CONNECT proto has been received firstPongSent // The first PONG has been sent infoUpdated // The server's Info object has changed before first PONG was sent ) // set the flag (would be equivalent to set the boolean to true) func (cf *clientFlag) set(c clientFlag) { *cf |= c } // isSet returns true if the flag is set, false otherwise func (cf clientFlag) isSet(c clientFlag) bool { return cf&c != 0 } // setIfNotSet will set the flag `c` only if that flag was not already // set and return true to indicate that the flag has been set. Returns // false otherwise. func (cf *clientFlag) setIfNotSet(c clientFlag) bool { if *cf&c == 0 { *cf |= c return true } return false } // clear unset the flag (would be equivalent to set the boolean to false) func (cf *clientFlag) clear(c clientFlag) { *cf &= ^c } type client struct { // Here first because of use of atomics, and memory alignment. stats mu sync.Mutex typ int cid uint64 lang string opts clientOpts start time.Time nc net.Conn mpay int ncs string bw *bufio.Writer srv *Server subs map[string]*subscription perms *permissions cache readCache pcd map[*client]struct{} atmr *time.Timer ptmr *time.Timer pout int wfc int msgb [msgScratchSize]byte last time.Time parseState route *route debug bool trace bool flags clientFlag // Compact booleans into a single field. Size will be increased when needed. } type permissions struct { sub *Sublist pub *Sublist pcache map[string]bool } const ( maxResultCacheSize = 512 maxPermCacheSize = 32 pruneSize = 16 ) // Used in readloop to cache hot subject lookups and group statistics. type readCache struct { genid uint64 results map[string]*SublistResult prand *rand.Rand inMsgs int inBytes int subs int } func (c *client) String() (id string) { return c.ncs } func (c *client) GetOpts() *clientOpts { return &c.opts } // GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil // otherwise. Implements the ClientAuth interface. func (c *client) GetTLSConnectionState() *tls.ConnectionState { tc, ok := c.nc.(*tls.Conn) if !ok { return nil } state := tc.ConnectionState() return &state } type subscription struct { client *client subject []byte queue []byte sid []byte nm int64 max int64 } type clientOpts struct { Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` SslRequired bool `json:"ssl_required"` Authorization string `json:"auth_token"` Username string `json:"user"` Password string `json:"pass"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` } var defaultOpts = clientOpts{Verbose: true, Pedantic: true} func init() { rand.Seed(time.Now().UnixNano()) } // Lock should be held func (c *client) initClient() { s := c.srv c.cid = atomic.AddUint64(&s.gcid, 1) c.bw = bufio.NewWriterSize(c.nc, startBufSize) c.subs = make(map[string]*subscription) c.debug = (atomic.LoadInt32(&debug) != 0) c.trace = (atomic.LoadInt32(&trace) != 0) // This is a scratch buffer used for processMsg() // The msg header starts with "MSG ", // in bytes that is [77 83 71 32]. c.msgb = [msgScratchSize]byte{77, 83, 71, 32} // This is to track pending clients that have data to be flushed // after we process inbound msgs from our own connection. c.pcd = make(map[*client]struct{}) // snapshot the string version of the connection conn := "-" if ip, ok := c.nc.(*net.TCPConn); ok { addr := ip.RemoteAddr().(*net.TCPAddr) conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port) } switch c.typ { case CLIENT: c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid) case ROUTER: c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid) } } // RegisterUser allows auth to call back into a new client // with the authenticated user. This is used to map any permissions // into the client. func (c *client) RegisterUser(user *User) { if user.Permissions == nil { return } // Process Permissions and map into client connection structures. c.mu.Lock() defer c.mu.Unlock() // Pre-allocate all to simplify checks later. c.perms = &permissions{} c.perms.sub = NewSublist() c.perms.pub = NewSublist() c.perms.pcache = make(map[string]bool) // Loop over publish permissions for _, pubSubject := range user.Permissions.Publish { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.Insert(sub) } // Loop over subscribe permissions for _, subSubject := range user.Permissions.Subscribe { sub := &subscription{subject: []byte(subSubject)} c.perms.sub.Insert(sub) } } func (c *client) readLoop() { // Grab the connection off the client, it will be cleared on a close. // We check for that after the loop, but want to avoid a nil dereference c.mu.Lock() nc := c.nc s := c.srv defer s.grWG.Done() c.mu.Unlock() if nc == nil { return } // Start read buffer. b := make([]byte, startBufSize) for { n, err := nc.Read(b) if err != nil { c.closeConnection() return } // Grab for updates for last activity. last := time.Now() // Clear inbound stats cache c.cache.inMsgs = 0 c.cache.inBytes = 0 c.cache.subs = 0 if err := c.parse(b[:n]); err != nil { // handled inline if err != ErrMaxPayload && err != ErrAuthorization { c.Errorf("Error reading from client: %s", err.Error()) c.sendErr("Parser Error") c.closeConnection() } return } // Updates stats for client and server that were collected // from parsing through the buffer. atomic.AddInt64(&c.inMsgs, int64(c.cache.inMsgs)) atomic.AddInt64(&c.inBytes, int64(c.cache.inBytes)) atomic.AddInt64(&s.inMsgs, int64(c.cache.inMsgs)) atomic.AddInt64(&s.inBytes, int64(c.cache.inBytes)) // Check pending clients for flush. for cp := range c.pcd { // Flush those in the set cp.mu.Lock() if cp.nc != nil { // Gather the flush calls that happened before now. // This is a signal into us about dynamic buffer allocation tuning. wfc := cp.wfc cp.wfc = 0 cp.nc.SetWriteDeadline(time.Now().Add(s.opts.WriteDeadline)) err := cp.bw.Flush() cp.nc.SetWriteDeadline(time.Time{}) if err != nil { c.Debugf("Error flushing: %v", err) cp.mu.Unlock() cp.closeConnection() cp.mu.Lock() } else { // Update outbound last activity. cp.last = last // Check if we should tune the buffer. sz := cp.bw.Available() // Check for expansion opportunity. if wfc > 2 && sz <= maxBufSize/2 { cp.bw = bufio.NewWriterSize(cp.nc, sz*2) } // Check for shrinking opportunity. if wfc == 0 && sz >= minBufSize*2 { cp.bw = bufio.NewWriterSize(cp.nc, sz/2) } } } cp.mu.Unlock() delete(c.pcd, cp) } // Check to see if we got closed, e.g. slow consumer c.mu.Lock() nc := c.nc // Activity based on interest changes or data/msgs. if c.cache.inMsgs > 0 || c.cache.subs > 0 { c.last = last } c.mu.Unlock() if nc == nil { return } // Update buffer size as/if needed. // Grow if n == len(b) && len(b) < maxBufSize { b = make([]byte, len(b)*2) } // Shrink, for now don't accelerate, ping/pong will eventually sort it out. if n < len(b)/2 && len(b) > minBufSize { b = make([]byte, len(b)/2) } } } func (c *client) traceMsg(msg []byte) { if !c.trace { return } // FIXME(dlc), allow limits to printable payload c.Tracef("->> MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF])) } func (c *client) traceInOp(op string, arg []byte) { c.traceOp("->> %s", op, arg) } func (c *client) traceOutOp(op string, arg []byte) { c.traceOp("<<- %s", op, arg) } func (c *client) traceOp(format, op string, arg []byte) { if !c.trace { return } opa := []interface{}{} if op != "" { opa = append(opa, op) } if arg != nil { opa = append(opa, string(arg)) } c.Tracef(format, opa) } // Process the information messages from Clients and other Routes. func (c *client) processInfo(arg []byte) error { info := Info{} if err := json.Unmarshal(arg, &info); err != nil { return err } if c.typ == ROUTER { c.processRouteInfo(&info) } return nil } func (c *client) processErr(errStr string) { switch c.typ { case CLIENT: c.Errorf("Client Error %s", errStr) case ROUTER: c.Errorf("Route Error %s", errStr) } c.closeConnection() } func (c *client) processConnect(arg []byte) error { c.traceInOp("CONNECT", arg) c.mu.Lock() // If we can't stop the timer because the callback is in progress... if !c.clearAuthTimer() { // wait for it to finish and handle sending the failure back to // the client. for c.nc != nil { c.mu.Unlock() time.Sleep(25 * time.Millisecond) c.mu.Lock() } c.mu.Unlock() return nil } c.last = time.Now() typ := c.typ r := c.route srv := c.srv // Moved unmarshalling of clients' Options under the lock. // The client has already been added to the server map, so it is possible // that other routines lookup the client, and access its options under // the client's lock, so unmarshalling the options outside of the lock // would cause data RACEs. if err := json.Unmarshal(arg, &c.opts); err != nil { c.mu.Unlock() return err } // Indicate that the CONNECT protocol has been received, and that the // server now knows which protocol this client supports. c.flags.set(connectReceived) // Capture these under lock proto := c.opts.Protocol verbose := c.opts.Verbose lang := c.opts.Lang c.mu.Unlock() if srv != nil { // As soon as c.opts is unmarshalled and if the proto is at // least ClientProtoInfo, we need to increment the following counter. // This is decremented when client is removed from the server's // clients map. if proto >= ClientProtoInfo { srv.mu.Lock() srv.cproto++ srv.mu.Unlock() } // Check for Auth if ok := srv.checkAuthorization(c); !ok { c.authViolation() return ErrAuthorization } } // Check client protocol request if it exists. if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) { c.sendErr(ErrBadClientProtocol.Error()) c.closeConnection() return ErrBadClientProtocol } else if typ == ROUTER && lang != "" { // Way to detect clients that incorrectly connect to the route listen // port. Client provide Lang in the CONNECT protocol while ROUTEs don't. c.sendErr(ErrClientConnectedToRoutePort.Error()) c.closeConnection() return ErrClientConnectedToRoutePort } // Grab connection name of remote route. if typ == ROUTER && r != nil { c.mu.Lock() c.route.remoteID = c.opts.Name c.mu.Unlock() } if verbose { c.sendOK() } return nil } func (c *client) authTimeout() { c.sendErr(ErrAuthTimeout.Error()) c.Debugf("Authorization Timeout") c.closeConnection() } func (c *client) authViolation() { if c.srv != nil && c.srv.opts.Users != nil { c.Errorf("%s - User %q", ErrAuthorization.Error(), c.opts.Username) } else { c.Errorf(ErrAuthorization.Error()) } c.sendErr("Authorization Violation") c.closeConnection() } func (c *client) maxConnExceeded() { c.Errorf(ErrTooManyConnections.Error()) c.sendErr(ErrTooManyConnections.Error()) c.closeConnection() } func (c *client) maxPayloadViolation(sz int) { c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, c.mpay) c.sendErr("Maximum Payload Violation") c.closeConnection() } // Assume the lock is held upon entry. func (c *client) sendProto(info []byte, doFlush bool) error { var err error if c.bw != nil && c.nc != nil { deadlineSet := false if doFlush || c.bw.Available() < len(info) { c.nc.SetWriteDeadline(time.Now().Add(c.srv.opts.WriteDeadline)) deadlineSet = true } _, err = c.bw.Write(info) if err == nil && doFlush { err = c.bw.Flush() } if deadlineSet { c.nc.SetWriteDeadline(time.Time{}) } } return err } // Assume the lock is held upon entry. func (c *client) sendInfo(info []byte) { c.sendProto(info, true) } func (c *client) sendErr(err string) { c.mu.Lock() c.traceOutOp("-ERR", []byte(err)) c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true) c.mu.Unlock() } func (c *client) sendOK() { c.mu.Lock() c.traceOutOp("OK", nil) // Can not autoflush this one, needs to be async. c.sendProto([]byte("+OK\r\n"), false) c.pcd[c] = needFlush c.mu.Unlock() } func (c *client) processPing() { c.mu.Lock() c.traceInOp("PING", nil) if c.nc == nil { c.mu.Unlock() return } c.traceOutOp("PONG", nil) err := c.sendProto([]byte("PONG\r\n"), true) if err != nil { c.clearConnection() c.Debugf("Error on Flush, error %s", err.Error()) } srv := c.srv sendUpdateINFO := false // Check if this is the first PONG, if so... if c.flags.setIfNotSet(firstPongSent) { // Check if server should send an async INFO protocol to the client if c.opts.Protocol >= ClientProtoInfo && srv != nil && c.flags.isSet(infoUpdated) { sendUpdateINFO = true } // We can now clear the flag c.flags.clear(infoUpdated) } c.mu.Unlock() // Some clients send an initial PING as part of the synchronous connect process. // They can't be receiving anything until the first PONG is received. // So we delay the possible updated INFO after this point. if sendUpdateINFO { srv.mu.Lock() // Use the cached protocol proto := srv.infoJSON srv.mu.Unlock() c.mu.Lock() c.sendInfo(proto) c.mu.Unlock() } } func (c *client) processPong() { c.traceInOp("PONG", nil) c.mu.Lock() c.pout = 0 c.mu.Unlock() } func (c *client) processMsgArgs(arg []byte) error { if c.trace { c.traceInOp("MSG", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 3: c.pa.reply = nil c.pa.szb = args[2] c.pa.size = parseSize(args[2]) case 4: c.pa.reply = args[2] c.pa.szb = args[3] c.pa.size = parseSize(args[3]) default: return fmt.Errorf("processMsgArgs Parse Error: '%s'", arg) } if c.pa.size < 0 { return fmt.Errorf("processMsgArgs Bad or Missing Size: '%s'", arg) } // Common ones processed after check for arg length c.pa.subject = args[0] c.pa.sid = args[1] return nil } func (c *client) processPub(arg []byte) error { if c.trace { c.traceInOp("PUB", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_PUB_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 2: c.pa.subject = args[0] c.pa.reply = nil c.pa.size = parseSize(args[1]) c.pa.szb = args[1] case 3: c.pa.subject = args[0] c.pa.reply = args[1] c.pa.size = parseSize(args[2]) c.pa.szb = args[2] default: return fmt.Errorf("processPub Parse Error: '%s'", arg) } if c.pa.size < 0 { return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg) } if c.mpay > 0 && c.pa.size > c.mpay { c.maxPayloadViolation(c.pa.size) return ErrMaxPayload } if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { c.sendErr("Invalid Subject") } return nil } func splitArg(arg []byte) [][]byte { a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } return args } func (c *client) processSub(argo []byte) (err error) { c.traceInOp("SUB", argo) // Indicate activity. c.cache.subs += 1 // Copy so we do not reference a potentially large buffer arg := make([]byte, len(argo)) copy(arg, argo) args := splitArg(arg) sub := &subscription{client: c} switch len(args) { case 2: sub.subject = args[0] sub.queue = nil sub.sid = args[1] case 3: sub.subject = args[0] sub.queue = args[1] sub.sid = args[2] default: return fmt.Errorf("processSub Parse Error: '%s'", arg) } shouldForward := false c.mu.Lock() if c.nc == nil { c.mu.Unlock() return nil } // Check permissions if applicable. if c.perms != nil { r := c.perms.sub.Match(string(sub.subject)) if len(r.psubs) == 0 { c.mu.Unlock() c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)) c.Errorf("Subscription Violation - User %q, Subject %q", c.opts.Username, sub.subject) return nil } } // We can have two SUB protocols coming from a route due to some // race conditions. We should make sure that we process only one. sid := string(sub.sid) if c.subs[sid] == nil { c.subs[sid] = sub if c.srv != nil { err = c.srv.sl.Insert(sub) if err != nil { delete(c.subs, sid) } else { shouldForward = c.typ != ROUTER } } } c.mu.Unlock() if err != nil { c.sendErr("Invalid Subject") return nil } else if c.opts.Verbose { c.sendOK() } if shouldForward { c.srv.broadcastSubscribe(sub) } return nil } func (c *client) unsubscribe(sub *subscription) { c.mu.Lock() defer c.mu.Unlock() if sub.max > 0 && sub.nm < sub.max { c.Debugf( "Deferring actual UNSUB(%s): %d max, %d received\n", string(sub.subject), sub.max, sub.nm) return } c.traceOp("<-> %s", "DELSUB", sub.sid) delete(c.subs, string(sub.sid)) if c.srv != nil { c.srv.sl.Remove(sub) } } func (c *client) processUnsub(arg []byte) error { c.traceInOp("UNSUB", arg) args := splitArg(arg) var sid []byte max := -1 switch len(args) { case 1: sid = args[0] case 2: sid = args[0] max = parseSize(args[1]) default: return fmt.Errorf("processUnsub Parse Error: '%s'", arg) } // Indicate activity. c.cache.subs += 1 var sub *subscription unsub := false shouldForward := false ok := false c.mu.Lock() if sub, ok = c.subs[string(sid)]; ok { if max > 0 { sub.max = int64(max) } else { // Clear it here to override sub.max = 0 } unsub = true shouldForward = c.typ != ROUTER && c.srv != nil } c.mu.Unlock() if unsub { c.unsubscribe(sub) } if shouldForward { c.srv.broadcastUnSubscribe(sub) } if c.opts.Verbose { c.sendOK() } return nil } func (c *client) msgHeader(mh []byte, sub *subscription) []byte { mh = append(mh, sub.sid...) mh = append(mh, ' ') if c.pa.reply != nil { mh = append(mh, c.pa.reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, "\r\n"...) return mh } // Used to treat maps as efficient set var needFlush = struct{}{} var routeSeen = struct{}{} func (c *client) deliverMsg(sub *subscription, mh, msg []byte) { if sub.client == nil { return } client := sub.client client.mu.Lock() sub.nm++ // Check if we should auto-unsubscribe. if sub.max > 0 { // For routing.. shouldForward := client.typ != ROUTER && client.srv != nil // If we are at the exact number, unsubscribe but // still process the message in hand, otherwise // unsubscribe and drop message on the floor. if sub.nm == sub.max { c.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid)) // Due to defer, reverse the code order so that execution // is consistent with other cases where we unsubscribe. if shouldForward { defer client.srv.broadcastUnSubscribe(sub) } defer client.unsubscribe(sub) } else if sub.nm > sub.max { c.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max) client.mu.Unlock() client.unsubscribe(sub) if shouldForward { client.srv.broadcastUnSubscribe(sub) } return } } if client.nc == nil { client.mu.Unlock() return } // Update statistics // The msg includes the CR_LF, so pull back out for accounting. msgSize := int64(len(msg) - LEN_CR_LF) // No atomic needed since accessed under client lock. // Monitor is reading those also under client's lock. client.outMsgs++ client.outBytes += msgSize atomic.AddInt64(&c.srv.outMsgs, 1) atomic.AddInt64(&c.srv.outBytes, msgSize) // Check to see if our writes will cause a flush // in the underlying bufio. If so limit time we // will wait for flush to complete. deadlineSet := false if client.bw.Available() < (len(mh) + len(msg)) { client.wfc++ client.nc.SetWriteDeadline(time.Now().Add(client.srv.opts.WriteDeadline)) deadlineSet = true } // Deliver to the client. _, err := client.bw.Write(mh) if err != nil { goto writeErr } _, err = client.bw.Write(msg) if err != nil { goto writeErr } if c.trace { client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil) } // TODO(dlc) - Do we need this or can we just call always? if deadlineSet { client.nc.SetWriteDeadline(time.Time{}) } client.mu.Unlock() c.pcd[client] = needFlush return writeErr: if deadlineSet { client.nc.SetWriteDeadline(time.Time{}) } client.mu.Unlock() if ne, ok := err.(net.Error); ok && ne.Timeout() { atomic.AddInt64(&client.srv.slowConsumers, 1) client.Noticef("Slow Consumer Detected") client.closeConnection() } else { c.Debugf("Error writing msg: %v", err) } } // processMsg is called to process an inbound msg from a client. func (c *client) processMsg(msg []byte) { // Snapshot server. srv := c.srv // Update statistics // The msg includes the CR_LF, so pull back out for accounting. c.cache.inMsgs += 1 c.cache.inBytes += len(msg) - LEN_CR_LF if c.trace { c.traceMsg(msg) } // defintely // Disallow publish to _SYS.>, these are reserved for internals. if c.pa.subject[0] == '_' && len(c.pa.subject) > 4 && c.pa.subject[1] == 'S' && c.pa.subject[2] == 'Y' && c.pa.subject[3] == 'S' && c.pa.subject[4] == '.' { c.pubPermissionViolation(c.pa.subject) return } // Check if published subject is allowed if we have permissions in place. if c.perms != nil { allowed, ok := c.perms.pcache[string(c.pa.subject)] if ok && !allowed { c.pubPermissionViolation(c.pa.subject) return } if !ok { r := c.perms.pub.Match(string(c.pa.subject)) notAllowed := len(r.psubs) == 0 if notAllowed { c.pubPermissionViolation(c.pa.subject) c.perms.pcache[string(c.pa.subject)] = false } else { c.perms.pcache[string(c.pa.subject)] = true } // Prune if needed. if len(c.perms.pcache) > maxPermCacheSize { // Prune the permissions cache. Keeps us from unbounded growth. r := 0 for subject := range c.perms.pcache { delete(c.cache.results, subject) r++ if r > pruneSize { break } } } // Return here to allow the pruning code to run if needed. if notAllowed { return } } } if c.opts.Verbose { c.sendOK() } // Mostly under testing scenarios. if srv == nil { return } var r *SublistResult var ok bool genid := atomic.LoadUint64(&srv.sl.genid) if genid == c.cache.genid && c.cache.results != nil { r, ok = c.cache.results[string(c.pa.subject)] } else { // reset c.cache.results = make(map[string]*SublistResult) c.cache.genid = genid } if !ok { subject := string(c.pa.subject) r = srv.sl.Match(subject) c.cache.results[subject] = r if len(c.cache.results) > maxResultCacheSize { // Prune the results cache. Keeps us from unbounded growth. r := 0 for subject := range c.cache.results { delete(c.cache.results, subject) r++ if r > pruneSize { break } } } } // Check for no interest, short circuit if so. if len(r.psubs) == 0 && len(r.qsubs) == 0 { return } // Check for pedantic and bad subject. if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { return } // Scratch buffer.. msgh := c.msgb[:len(msgHeadProto)] // msg header msgh = append(msgh, c.pa.subject...) msgh = append(msgh, ' ') si := len(msgh) isRoute := c.typ == ROUTER // If we are a route and we have a queue subscription, deliver direct // since they are sent direct via L2 semantics. If the match is a queue // subscription, we will return from here regardless if we find a sub. if isRoute { if sub, ok := srv.routeSidQueueSubscriber(c.pa.sid); ok { if sub != nil { mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } return } } // Used to only send normal subscriptions once across a given route. var rmap map[string]struct{} // Loop over all normal subscriptions that match. for _, sub := range r.psubs { // Check if this is a send to a ROUTER, make sure we only send it // once. The other side will handle the appropriate re-processing // and fan-out. Also enforce 1-Hop semantics, so no routing to another. if sub.client.typ == ROUTER { // Skip if sourced from a ROUTER and going to another ROUTER. // This is 1-Hop semantics for ROUTERs. if isRoute { continue } // Check to see if we have already sent it here. if rmap == nil { rmap = make(map[string]struct{}, srv.numRoutes()) } sub.client.mu.Lock() if sub.client.nc == nil || sub.client.route == nil || sub.client.route.remoteID == "" { c.Debugf("Bad or Missing ROUTER Identity, not processing msg") sub.client.mu.Unlock() continue } if _, ok := rmap[sub.client.route.remoteID]; ok { c.Debugf("Ignoring route, already processed") sub.client.mu.Unlock() continue } rmap[sub.client.route.remoteID] = routeSeen sub.client.mu.Unlock() } // Normal delivery mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } // Now process any queue subs we have if not a route if !isRoute { // Check to see if we have our own rand yet. Global rand // has contention with lots of clients, etc. if c.cache.prand == nil { c.cache.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } // Process queue subs for i := 0; i < len(r.qsubs); i++ { qsubs := r.qsubs[i] index := c.cache.prand.Intn(len(qsubs)) sub := qsubs[index] if sub != nil { mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } } } } func (c *client) pubPermissionViolation(subject []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject)) c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject) } func (c *client) processPingTimer() { c.mu.Lock() defer c.mu.Unlock() c.ptmr = nil // Check if connection is still opened if c.nc == nil { return } c.Debugf("%s Ping Timer", c.typeString()) // Check for violation c.pout++ if c.pout > c.srv.opts.MaxPingsOut { c.Debugf("Stale Client Connection - Closing") c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true) c.clearConnection() return } c.traceOutOp("PING", nil) // Send PING err := c.sendProto([]byte("PING\r\n"), true) if err != nil { c.Debugf("Error on Client Ping Flush, error %s", err) c.clearConnection() } else { // Reset to fire again if all OK. c.setPingTimer() } } func (c *client) setPingTimer() { if c.srv == nil { return } d := c.srv.opts.PingInterval c.ptmr = time.AfterFunc(d, c.processPingTimer) } // Lock should be held func (c *client) clearPingTimer() { if c.ptmr == nil { return } c.ptmr.Stop() c.ptmr = nil } // Lock should be held func (c *client) setAuthTimer(d time.Duration) { c.atmr = time.AfterFunc(d, func() { c.authTimeout() }) } // Lock should be held func (c *client) clearAuthTimer() bool { if c.atmr == nil { return true } stopped := c.atmr.Stop() c.atmr = nil return stopped } func (c *client) isAuthTimerSet() bool { c.mu.Lock() isSet := c.atmr != nil c.mu.Unlock() return isSet } // Lock should be held func (c *client) clearConnection() { if c.nc == nil { return } // With TLS, Close() is sending an alert (that is doing a write). // Need to set a deadline otherwise the server could block there // if the peer is not reading from socket. c.nc.SetWriteDeadline(time.Now().Add(c.srv.opts.WriteDeadline)) if c.bw != nil { c.bw.Flush() } c.nc.Close() c.nc.SetWriteDeadline(time.Time{}) } func (c *client) typeString() string { switch c.typ { case CLIENT: return "Client" case ROUTER: return "Router" } return "Unknown Type" } func (c *client) closeConnection() { c.mu.Lock() if c.nc == nil { c.mu.Unlock() return } c.Debugf("%s connection closed", c.typeString()) c.clearAuthTimer() c.clearPingTimer() c.clearConnection() c.nc = nil // Snapshot for use. subs := make([]*subscription, 0, len(c.subs)) for _, sub := range c.subs { subs = append(subs, sub) } srv := c.srv retryImplicit := false if c.route != nil { retryImplicit = c.route.retry } c.mu.Unlock() if srv != nil { // Unregister srv.removeClient(c) // Remove clients subscriptions. for _, sub := range subs { srv.sl.Remove(sub) // Forward on unsubscribes if we are not // a router ourselves. if c.typ != ROUTER { srv.broadcastUnSubscribe(sub) } } } // Check for a solicited route. If it was, start up a reconnect unless // we are already connected to the other end. if c.isSolicitedRoute() || retryImplicit { // Capture these under lock c.mu.Lock() rid := c.route.remoteID rtype := c.route.routeType rurl := c.route.url c.mu.Unlock() srv.mu.Lock() defer srv.mu.Unlock() // It is possible that the server is being shutdown. // If so, don't try to reconnect if !srv.running { return } if rid != "" && srv.remotes[rid] != nil { Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid) return } else if rid == srv.info.ID { Debugf("Detected route to self, ignoring \"%s\"", rurl) return } else if rtype != Implicit || retryImplicit { Debugf("Attempting reconnect for solicited route \"%s\"", rurl) // Keep track of this go-routine so we can wait for it on // server shutdown. srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) }) } } } // Logging functionality scoped to a client or route. func (c *client) Errorf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Errorf(format, v...) } func (c *client) Debugf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Debugf(format, v...) } func (c *client) Noticef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Noticef(format, v...) } func (c *client) Tracef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Tracef(format, v...) }
1
7,004
We know debug and trace as globals exist and are defaulted to 0. In this instance we need to know that c is non- nil, srv is non-nil and logging is non-nil before we can trust this statement not to panic.
nats-io-nats-server
go
@@ -58,7 +58,6 @@ type Provider interface { func NewApp(AppRoot string, provider string) (*DdevApp, error) { // Set defaults. app := &DdevApp{} - app.ConfigPath = filepath.Join(AppRoot, ".ddev", "config.yaml") app.AppRoot = AppRoot app.ConfigPath = app.GetConfigPath("config.yaml")
1
package ddevapp import ( "bytes" "fmt" "html/template" "io/ioutil" "os" "path/filepath" "strings" "regexp" "github.com/drud/ddev/pkg/appports" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil" "github.com/drud/ddev/pkg/output" "github.com/drud/ddev/pkg/util" "github.com/drud/ddev/pkg/version" log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" "runtime" ) // DefaultProviderName contains the name of the default provider which will be used if one is not otherwise specified. const DefaultProviderName = "default" // DdevDefaultPHPVersion is the default PHP version, overridden by $DDEV_PHP_VERSION const DdevDefaultPHPVersion = "7.1" // DdevDefaultRouterHTTPPort is the starting router port, 80 const DdevDefaultRouterHTTPPort = "80" // DdevDefaultRouterHTTPSPort is the starting https router port, 443 const DdevDefaultRouterHTTPSPort = "443" // Regexp pattern to determine if a hostname is valid per RFC 1123. var hostRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) // Command defines commands to be run as pre/post hooks type Command struct { Exec string `yaml:"exec,omitempty"` ExecHost string `yaml:"exec-host,omitempty"` } // Provider is the interface which all provider plugins must implement. type Provider interface { Init(app *DdevApp) error ValidateField(string, string) error PromptForConfig() error Write(string) error Read(string) error Validate() error GetBackup(string) (fileLocation string, importPath string, err error) } // NewApp creates a new DdevApp struct with defaults set and overridden by any existing config.yml. func NewApp(AppRoot string, provider string) (*DdevApp, error) { // Set defaults. app := &DdevApp{} app.ConfigPath = filepath.Join(AppRoot, ".ddev", "config.yaml") app.AppRoot = AppRoot app.ConfigPath = app.GetConfigPath("config.yaml") app.APIVersion = version.DdevVersion app.PHPVersion = DdevDefaultPHPVersion app.RouterHTTPPort = DdevDefaultRouterHTTPPort app.RouterHTTPSPort = DdevDefaultRouterHTTPSPort // These should always default to the latest image/tag names from the Version package. app.WebImage = version.WebImg + ":" + version.WebTag app.DBImage = version.DBImg + ":" + version.DBTag app.DBAImage = version.DBAImg + ":" + version.DBATag // Load from file if available. This will return an error if the file doesn't exist, // and it is up to the caller to determine if that's an issue. if _, err := os.Stat(app.ConfigPath); !os.IsNotExist(err) { err = app.ReadConfig() if err != nil { return app, fmt.Errorf("%v exists but cannot be read. It may be invalid due to a syntax error.: %v", app.ConfigPath, err) } } // Allow override with "pantheon" from function provider arg, but nothing else. // Otherwise we accept whatever might have been in config file if there was anything. if provider == "" && app.Provider != "" { // Do nothing. This is the case where the config has a provider and no override is provided. Config wins. } else if provider == "pantheon" || provider == DefaultProviderName { app.Provider = provider // Use the provider passed-in. Function argument wins. } else if provider == "" && app.Provider == "" { app.Provider = DefaultProviderName // Nothing passed in, nothing configured. Set c.Provider to default } else { return app, fmt.Errorf("Provider '%s' is not implemented", provider) } return app, nil } // GetConfigPath returns the path to an application config file specified by filename. func (app *DdevApp) GetConfigPath(filename string) string { return filepath.Join(app.AppRoot, ".ddev", filename) } // WriteConfig writes the app configuration into the .ddev folder. func (app *DdevApp) WriteConfig() error { // Work against a copy of the DdevApp, since we don't want to actually change it. appcopy := *app // Update the "APIVersion" to be the ddev version. appcopy.APIVersion = version.DdevVersion // We don't want to even set the images on write, even though we'll respect them on read. appcopy.DBAImage = "" appcopy.DBImage = "" appcopy.WebImage = "" err := PrepDdevDirectory(filepath.Dir(appcopy.ConfigPath)) if err != nil { return err } cfgbytes, err := yaml.Marshal(appcopy) if err != nil { return err } // Append current image information cfgbytes = append(cfgbytes, []byte(fmt.Sprintf("\n\n# This config.yaml was created with ddev version %s \n# webimage: %s:%s\n# dbimage: %s:%s\n# dbaimage: %s:%s\n# However we do not recommend explicitly wiring these images into the\n# config.yaml as they may break future versions of ddev.\n# You can update this config.yaml using 'ddev config'.\n", version.DdevVersion, version.WebImg, version.WebTag, version.DBImg, version.DBTag, version.DBAImg, version.DBATag))...) // Append hook information and sample hook suggestions. cfgbytes = append(cfgbytes, []byte(ConfigInstructions)...) cfgbytes = append(cfgbytes, appcopy.GetHookDefaultComments()...) err = ioutil.WriteFile(appcopy.ConfigPath, cfgbytes, 0644) if err != nil { return err } provider, err := appcopy.GetProvider() if err != nil { return err } err = provider.Write(appcopy.GetConfigPath("import.yaml")) if err != nil { return err } // Allow project-specific post-config action err = appcopy.PostConfigAction() if err != nil { return err } return nil } // ReadConfig reads app configuration from a specified location on disk, falling // back to defaults for config values not defined in the read config file. func (app *DdevApp) ReadConfig() error { source, err := ioutil.ReadFile(app.ConfigPath) if err != nil { return fmt.Errorf("could not find an active ddev configuration at %s have you run 'ddev config'? %v", app.ConfigPath, err) } // validate extend command keys err = validateCommandYaml(source) if err != nil { return fmt.Errorf("invalid configuration in %s: %v", app.ConfigPath, err) } // ReadConfig config values from file. err = yaml.Unmarshal(source, app) if err != nil { return err } if app.APIVersion != version.DdevVersion { util.Warning("Your .ddev/config.yaml version is %s, but ddev is version %s. \nPlease run 'ddev config' to update your config.yaml. \nddev may not operate correctly until you do.", app.APIVersion, version.DdevVersion) } // If any of these values aren't defined in the config file, set them to defaults. if app.Name == "" { app.Name = filepath.Base(app.AppRoot) } if app.PHPVersion == "" { app.PHPVersion = DdevDefaultPHPVersion } if app.RouterHTTPPort == "" { app.RouterHTTPPort = DdevDefaultRouterHTTPPort } if app.RouterHTTPSPort == "" { app.RouterHTTPSPort = DdevDefaultRouterHTTPSPort } if app.WebImage == "" { app.WebImage = version.WebImg + ":" + version.WebTag } if app.DBImage == "" { app.DBImage = version.DBImg + ":" + version.DBTag } if app.DBAImage == "" { app.DBAImage = version.DBAImg + ":" + version.DBATag } dirPath := filepath.Join(util.GetGlobalDdevDir(), app.Name) app.DataDir = filepath.Join(dirPath, "mysql") app.ImportDir = filepath.Join(dirPath, "import-db") app.SetApptypeSettingsPaths() return nil } // WarnIfConfigReplace just messages user about whether config is being replaced or created func (app *DdevApp) WarnIfConfigReplace() { if app.ConfigExists() { util.Warning("You are reconfiguring the project at %s. \nThe existing configuration will be updated and replaced.", app.AppRoot) } else { util.Success("Creating a new ddev project config in the current directory (%s)", app.AppRoot) util.Success("Once completed, your configuration will be written to %s\n", app.ConfigPath) } } // PromptForConfig goes through a set of prompts to receive user input and generate an Config struct. func (app *DdevApp) PromptForConfig() error { app.WarnIfConfigReplace() for { err := app.promptForName() if err == nil { break } output.UserOut.Printf("%v", err) } for { err := app.docrootPrompt() if err == nil { break } output.UserOut.Printf("%v", err) } err := app.appTypePrompt() if err != nil { return err } err = app.ConfigFileOverrideAction() if err != nil { return err } err = app.providerInstance.PromptForConfig() return err } // ValidateConfig ensures the configuration meets ddev's requirements. func (app *DdevApp) ValidateConfig() error { // validate docroot fullPath := filepath.Join(app.AppRoot, app.Docroot) if _, err := os.Stat(fullPath); os.IsNotExist(err) { return fmt.Errorf("no directory could be found at %s. Please enter a valid docroot in your configuration", fullPath) } // validate hostname match := hostRegex.MatchString(app.GetHostname()) if !match { return fmt.Errorf("%s is not a valid hostname. Please enter a site name in your configuration that will allow for a valid hostname. See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_hostnames for valid hostname requirements", app.GetHostname()) } // validate apptype match = IsValidAppType(app.Type) if !match { return fmt.Errorf("'%s' is not a valid apptype", app.Type) } return nil } // DockerComposeYAMLPath returns the absolute path to where the // docker-compose.yaml should exist for this app. func (app *DdevApp) DockerComposeYAMLPath() string { return app.GetConfigPath("docker-compose.yaml") } // GetHostname returns the primary hostname of the app. func (app *DdevApp) GetHostname() string { return app.Name + "." + version.DDevTLD } // GetHostnames returns an array of all the configured hostnames. func (app *DdevApp) GetHostnames() []string { // Use a map to make sure that we have unique hostnames // The value is useless, so just use the int 1 for assignment. nameListMap := make(map[string]int) nameListMap[app.GetHostname()] = 1 for _, name := range app.AdditionalHostnames { nameListMap[name+"."+version.DDevTLD] = 1 } // Now walk the map and extract the keys into an array. nameListArray := make([]string, 0, len(nameListMap)) for k := range nameListMap { nameListArray = append(nameListArray, k) } return nameListArray } // WriteDockerComposeConfig writes a docker-compose.yaml to the app configuration directory. func (app *DdevApp) WriteDockerComposeConfig() error { var err error if fileutil.FileExists(app.DockerComposeYAMLPath()) { found, err := fileutil.FgrepStringInFile(app.DockerComposeYAMLPath(), DdevFileSignature) util.CheckErr(err) // If we did *not* find the ddev file signature in docker-compose.yaml, we'll back it up and warn about it. if !found { util.Warning("User-managed docker-compose.yaml will be replaced with ddev-generated docker-compose.yaml. Original file will be placed in docker-compose.yaml.bak") _ = os.Remove(app.DockerComposeYAMLPath() + ".bak") err = os.Rename(app.DockerComposeYAMLPath(), app.DockerComposeYAMLPath()+".bak") util.CheckErr(err) } } // nolint: vetshadow f, err := os.Create(app.DockerComposeYAMLPath()) if err != nil { return err } defer util.CheckClose(f) rendered, err := app.RenderComposeYAML() if err != nil { return err } _, err = f.WriteString(rendered) if err != nil { return err } return err } // CheckCustomConfig warns the user if any custom configuration files are in use. func (app *DdevApp) CheckCustomConfig() { // Get the path to .ddev for the current app. ddevDir := filepath.Dir(app.ConfigPath) customConfig := false if _, err := os.Stat(filepath.Join(ddevDir, "nginx-site.conf")); err == nil { util.Warning("Using custom nginx configuration in nginx-site.conf") customConfig = true } mysqlPath := filepath.Join(ddevDir, "mysql") if _, err := os.Stat(mysqlPath); err == nil { mysqlFiles, err := fileutil.ListFilesInDir(mysqlPath) util.CheckErr(err) if len(mysqlFiles) > 0 { util.Warning("Using custom mysql configuration: %v", mysqlFiles) customConfig = true } } phpPath := filepath.Join(ddevDir, "php") if _, err := os.Stat(phpPath); err == nil { phpFiles, err := fileutil.ListFilesInDir(phpPath) util.CheckErr(err) if len(phpFiles) > 0 { util.Warning("Using custom PHP configuration: %v", phpFiles) customConfig = true } } if customConfig { util.Warning("Custom configuration takes effect when container is created, \nusually on start, use 'ddev restart' if you're not seeing it take effect.") } } // RenderComposeYAML renders the contents of docker-compose.yaml. func (app *DdevApp) RenderComposeYAML() (string, error) { var doc bytes.Buffer var err error var docker0Addr = "127.0.0.1" var docker0Hostname = "unneeded" templ := template.New("compose template") templ, err = templ.Parse(DDevComposeTemplate) if err != nil { return "", err } // Docker 18.03 on linux doesn't define host.docker.internal // so we need to go get the ip address of docker0 // We would hope to be able to remove this when // https://github.com/docker/for-linux/issues/264 gets resolved. if runtime.GOOS == "linux" { out, err := exec.RunCommandPipe("ip", []string{"address", "show", "dev", "docker0"}) // Do not process if ip command fails, we'll just ignore and not act. if err == nil { addr := regexp.MustCompile(`inet *[0-9\.]+`).FindString(out) components := strings.Split(addr, " ") if len(components) == 2 { docker0Addr = components[1] docker0Hostname = "host.docker.internal" } } } templateVars := map[string]string{ "name": app.Name, "plugin": "ddev", "appType": app.Type, "mailhogport": appports.GetPort("mailhog"), "dbaport": appports.GetPort("dba"), "dbport": appports.GetPort("db"), "ddevgenerated": DdevFileSignature, "extra_host": docker0Hostname + `:` + docker0Addr, } err = templ.Execute(&doc, templateVars) return doc.String(), err } // Define an application name. func (app *DdevApp) promptForName() error { provider, err := app.GetProvider() if err != nil { return err } namePrompt := "Project name" if app.Name == "" { dir, err := os.Getwd() // if working directory name is invalid for hostnames, we shouldn't suggest it if err == nil && hostRegex.MatchString(filepath.Base(dir)) { app.Name = filepath.Base(dir) } } namePrompt = fmt.Sprintf("%s (%s)", namePrompt, app.Name) fmt.Print(namePrompt + ": ") app.Name = util.GetInput(app.Name) return provider.ValidateField("Name", app.Name) } // AvailableDocrootLocations returns an of default docroot locations to look for. func AvailableDocrootLocations() []string { return []string{ "web/public", "web", "docroot", "htdocs", "_www", "public", } } // DiscoverDefaultDocroot returns the default docroot directory. func DiscoverDefaultDocroot(app *DdevApp) string { // Provide use the app.Docroot as the default docroot option. var defaultDocroot = app.Docroot if defaultDocroot == "" { for _, docroot := range AvailableDocrootLocations() { if _, err := os.Stat(docroot); err != nil { continue } if fileutil.FileExists(filepath.Join(docroot, "index.php")) { defaultDocroot = docroot break } } } return defaultDocroot } // Determine the document root. func (app *DdevApp) docrootPrompt() error { provider, err := app.GetProvider() if err != nil { return err } // Determine the document root. output.UserOut.Printf("\nThe docroot is the directory from which your site is served. This is a relative path from your project root (%s)", app.AppRoot) output.UserOut.Println("You may leave this value blank if your site files are in the project root") var docrootPrompt = "Docroot Location" var defaultDocroot = DiscoverDefaultDocroot(app) // If there is a default docroot, display it in the prompt. if defaultDocroot != "" { docrootPrompt = fmt.Sprintf("%s (%s)", docrootPrompt, defaultDocroot) } else { docrootPrompt = fmt.Sprintf("%s (current directory)", docrootPrompt) } fmt.Print(docrootPrompt + ": ") app.Docroot = util.GetInput(defaultDocroot) // Ensure the docroot exists. If it doesn't, prompt the user to verify they entered it correctly. fullPath := filepath.Join(app.AppRoot, app.Docroot) if _, err := os.Stat(fullPath); os.IsNotExist(err) { output.UserOut.Errorf("No directory could be found at %s. Please enter a valid docroot\n", fullPath) app.Docroot = "" return app.docrootPrompt() } return provider.ValidateField("Docroot", app.Docroot) } // ConfigExists determines if a ddev config file exists for this application. func (app *DdevApp) ConfigExists() bool { if _, err := os.Stat(app.ConfigPath); os.IsNotExist(err) { return false } return true } // appTypePrompt handles the Type workflow. func (app *DdevApp) appTypePrompt() error { provider, err := app.GetProvider() if err != nil { return err } validAppTypes := strings.Join(GetValidAppTypes(), ", ") typePrompt := fmt.Sprintf("Project Type [%s]", validAppTypes) // First, see if we can auto detect what kind of site it is so we can set a sane default. detectedAppType := app.DetectAppType() // If the detected detectedAppType is php, we'll ask them to confirm, // otherwise go with it. // If we found an application type just set it and inform the user. util.Success("Found a %s codebase at %s.", detectedAppType, filepath.Join(app.AppRoot, app.Docroot)) typePrompt = fmt.Sprintf("%s (%s)", typePrompt, detectedAppType) fmt.Printf(typePrompt + ": ") appType := strings.ToLower(util.GetInput(detectedAppType)) for !IsValidAppType(appType) { output.UserOut.Errorf("'%s' is not a valid project type. Allowed project types are: %s\n", appType, validAppTypes) fmt.Printf(typePrompt + ": ") appType = strings.ToLower(util.GetInput(appType)) } app.Type = appType return provider.ValidateField("Type", app.Type) } // PrepDdevDirectory creates a .ddev directory in the current working directory func PrepDdevDirectory(dir string) error { if _, err := os.Stat(dir); os.IsNotExist(err) { log.WithFields(log.Fields{ "directory": dir, }).Debug("Config Directory does not exist, attempting to create.") err := os.MkdirAll(dir, 0755) if err != nil { return err } } return nil } // validateCommandYaml validates command hooks and tasks defined in hooks for config.yaml func validateCommandYaml(source []byte) error { validHooks := []string{ "pre-start", "post-start", "pre-import-db", "post-import-db", "pre-import-files", "post-import-files", } validTasks := []string{ "exec", "exec-host", } type Validate struct { Commands map[string][]map[string]interface{} `yaml:"hooks,omitempty"` } val := &Validate{} err := yaml.Unmarshal(source, val) if err != nil { return err } for command, tasks := range val.Commands { var match bool for _, hook := range validHooks { if command == hook { match = true } } if !match { return fmt.Errorf("invalid command hook %s defined in config.yaml", command) } for _, taskSet := range tasks { for taskName := range taskSet { var match bool for _, validTask := range validTasks { if taskName == validTask { match = true } } if !match { return fmt.Errorf("invalid task '%s' defined for %s hook in config.yaml", taskName, command) } } } } return nil }
1
12,787
Good removal here for two reasons: we're redeclaring `app.ConfigPath` immediately below without having used the initial value, and `app.GetConfigPath()` will build the value using the same process as in this line anyway.
drud-ddev
php
@@ -193,9 +193,7 @@ namespace Microsoft.VisualStudio.TestPlatform.CommandLine.Processors new RunSpecificTestsArgumentProcessor(), new TestAdapterPathArgumentProcessor(), new TestCaseFilterArgumentProcessor(), - new OutputArgumentProcessor(), new BuildBasePathArgumentProcessor(), - new ConfigurationArgumentProcessor(), new ParentProcessIdArgumentProcessor(), new PortArgumentProcessor(), new RunSettingsArgumentProcessor(),
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.VisualStudio.TestPlatform.CommandLine.Processors { using System; using System.Collections.Generic; using System.Diagnostics.Contracts; using System.Linq; using Microsoft.VisualStudio.TestPlatform.ObjectModel; /// <summary> /// Used to create the appropriate instance of an argument processor. /// </summary> internal class ArgumentProcessorFactory { #region Constants /// <summary> /// The command starter. /// </summary> internal const string CommandStarter = "/"; /// <summary> /// The xplat command starter. /// </summary> internal const string XplatCommandStarter = "-"; #endregion #region Fields /// <summary> /// Available argument processors. /// </summary> private readonly IEnumerable<IArgumentProcessor> argumentProcessors; private Dictionary<string, IArgumentProcessor> commandToProcessorMap; private Dictionary<string, IArgumentProcessor> specialCommandToProcessorMap; #endregion #region Constructor /// <summary> /// Initializes the argument processor factory. /// </summary> /// <param name="argumentProcessors"> /// The argument Processors. /// </param> /// <remarks> /// This is not public because the static Create method should be used to access the instance. /// </remarks> protected ArgumentProcessorFactory(IEnumerable<IArgumentProcessor> argumentProcessors) { Contract.Requires(argumentProcessors != null); this.argumentProcessors = argumentProcessors; } #endregion #region Static Methods /// <summary> /// Creates ArgumentProcessorFactory. /// </summary> /// <returns>ArgumentProcessorFactory.</returns> internal static ArgumentProcessorFactory Create() { // Get the ArgumentProcessorFactory return new ArgumentProcessorFactory(DefaultArgumentProcessors); } #endregion #region Properties /// <summary> /// Returns all of the available argument processors. /// </summary> public IEnumerable<IArgumentProcessor> AllArgumentProcessors { get { return argumentProcessors; } } /// <summary> /// Gets a mapping between command and Argument Executor. /// </summary> internal Dictionary<string, IArgumentProcessor> CommandToProcessorMap { get { // Build the mapping if it does not already exist. if (this.commandToProcessorMap == null) { BuildCommandMaps(); } return this.commandToProcessorMap; } } /// <summary> /// Gets a mapping between special commands and their Argument Processors. /// </summary> internal Dictionary<string, IArgumentProcessor> SpecialCommandToProcessorMap { get { // Build the mapping if it does not already exist. if (this.specialCommandToProcessorMap == null) { BuildCommandMaps(); } return this.specialCommandToProcessorMap; } } #endregion #region Public Methods /// <summary> /// Creates the argument processor associated with the provided command line argument. /// The Lazy that is returned will initialize the underlying argument processor when it is first accessed. /// </summary> /// <param name="argument">Command line argument to create the argument processor for.</param> /// <returns>The argument processor or null if one was not found.</returns> public IArgumentProcessor CreateArgumentProcessor(string argument) { if (String.IsNullOrWhiteSpace(argument)) { throw new ArgumentException("Cannot be null or empty", "argument"); } Contract.EndContractBlock(); // Parse the input into its command and argument parts. var pair = new CommandArgumentPair(argument); // Find the associated argument processor. IArgumentProcessor argumentProcessor; CommandToProcessorMap.TryGetValue(pair.Command, out argumentProcessor); // If an argument processor was not found for the command, then consider it as a test source argument. if (argumentProcessor == null) { // Update the command pair since the command is actually the argument in the case of // a test source. pair = new CommandArgumentPair(TestSourceArgumentProcessor.CommandName, argument); argumentProcessor = SpecialCommandToProcessorMap[TestSourceArgumentProcessor.CommandName]; } if (argumentProcessor != null) { argumentProcessor = WrapLazyProcessorToInitializeOnInstantiation(argumentProcessor, pair.Argument); } return argumentProcessor; } /// <summary> /// Creates the default action argument processor. /// The Lazy that is returned will initialize the underlying argument processor when it is first accessed. /// </summary> /// <returns>The default action argument processor.</returns> public IArgumentProcessor CreateDefaultActionArgumentProcessor() { var argumentProcessor = SpecialCommandToProcessorMap[RunTestsArgumentProcessor.CommandName]; return WrapLazyProcessorToInitializeOnInstantiation(argumentProcessor, null); } /// <summary> /// Gets the argument processors that are tagged as special and to be always executed. /// The Lazy's that are returned will initialize the underlying argument processor when first accessed. /// </summary> /// <returns>The argument processors that are tagged as special and to be always executed.</returns> public IEnumerable<IArgumentProcessor> GetArgumentProcessorsToAlwaysExecute() { return SpecialCommandToProcessorMap.Values .Where(lazyProcessor => lazyProcessor.Metadata.Value.IsSpecialCommand && lazyProcessor.Metadata.Value.AlwaysExecute); } #endregion #region Private Methods private static IEnumerable<IArgumentProcessor> DefaultArgumentProcessors => new List<IArgumentProcessor> { new HelpArgumentProcessor(), new TestSourceArgumentProcessor(), new ListTestsArgumentProcessor(), new RunTestsArgumentProcessor(), new RunSpecificTestsArgumentProcessor(), new TestAdapterPathArgumentProcessor(), new TestCaseFilterArgumentProcessor(), new OutputArgumentProcessor(), new BuildBasePathArgumentProcessor(), new ConfigurationArgumentProcessor(), new ParentProcessIdArgumentProcessor(), new PortArgumentProcessor(), new RunSettingsArgumentProcessor(), new PlatformArgumentProcessor(), new FrameworkArgumentProcessor(), new EnableLoggerArgumentProcessor(), new ParallelArgumentProcessor() }; /// <summary> /// Builds the command to processor map and special command to processor map. /// </summary> private void BuildCommandMaps() { this.commandToProcessorMap = new Dictionary<string, IArgumentProcessor>(StringComparer.OrdinalIgnoreCase); this.specialCommandToProcessorMap = new Dictionary<string, IArgumentProcessor>(StringComparer.OrdinalIgnoreCase); foreach (IArgumentProcessor argumentProcessor in this.argumentProcessors) { // Add the command to the appropriate dictionary. var processorsMap = argumentProcessor.Metadata.Value.IsSpecialCommand ? this.specialCommandToProcessorMap : this.commandToProcessorMap; string commandName = argumentProcessor.Metadata.Value.CommandName; processorsMap.Add(commandName, argumentProcessor); // Add xplat name for the command name commandName = string.Concat("--", commandName.Remove(0, 1)); processorsMap.Add(commandName, argumentProcessor); if (!string.IsNullOrEmpty(argumentProcessor.Metadata.Value.ShortCommandName)) { string shortCommandName = argumentProcessor.Metadata.Value.ShortCommandName; processorsMap.Add(shortCommandName, argumentProcessor); // Add xplat short name for the command name shortCommandName = shortCommandName.Replace('/', '-'); processorsMap.Add(shortCommandName, argumentProcessor); } } } /// <summary> /// Decorates a lazy argument processor so that the real processor is initialized when the lazy value is obtained. /// </summary> /// <param name="processor">The lazy processor.</param> /// <param name="initArg">The argument with which the real processor should be initialized.</param> /// <returns>The decorated lazy processor.</returns> private static IArgumentProcessor WrapLazyProcessorToInitializeOnInstantiation( IArgumentProcessor processor, string initArg) { var processorExecutor = processor.Executor; var lazyArgumentProcessor = new Lazy<IArgumentExecutor>(() => { IArgumentExecutor instance = null; try { instance = processorExecutor.Value; } catch (Exception e) { if (EqtTrace.IsErrorEnabled) { EqtTrace.Error("ArgumentProcessorFactory.WrapLazyProcessorToInitializeOnInstantiation: Exception creating argument processor: {0}", e); } throw; } if (instance != null) { try { instance.Initialize(initArg); } catch (Exception e) { if (EqtTrace.IsErrorEnabled) { EqtTrace.Error("ArgumentProcessorFactory.WrapLazyProcessorToInitializeOnInstantiation: Exception initializing argument processor: {0}", e); } throw; } } return instance; }, System.Threading.LazyThreadSafetyMode.PublicationOnly); processor.Executor = lazyArgumentProcessor; return processor; } #endregion } }
1
11,237
Should BuildBase be removed?
microsoft-vstest
.cs
@@ -1,13 +1,14 @@ package internal_test import ( + "io/ioutil" "os" + "path" "regexp" "testing" - "github.com/golangci/golangci-lint/pkg/fsutils" - ast "github.com/stretchr/testify/assert" - req "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" . "github.com/filecoin-project/go-filecoin/tools/migration/internal"
1
package internal_test import ( "os" "regexp" "testing" "github.com/golangci/golangci-lint/pkg/fsutils" ast "github.com/stretchr/testify/assert" req "github.com/stretchr/testify/require" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" . "github.com/filecoin-project/go-filecoin/tools/migration/internal" ) func TestRepoMigrationHelper_GetOldRepo(t *testing.T) { tf.UnitTest(t) assert := ast.New(t) require := req.New(t) t.Run("Uses the option values when passed to ctor", func(t *testing.T) { dirname := "/tmp/filecoindir" mustMakeTmpDir(require, dirname) defer mustRmDir(require, dirname) rmh := NewRepoFSWrangler(dirname, "", "1", "2") or, err := rmh.GetOldRepo() require.NoError(err) assert.Equal(dirname, or.Name()) }) } func TestRepoMigrationHelper_MakeNewRepo(t *testing.T) { tf.UnitTest(t) assert := ast.New(t) require := req.New(t) t.Run("Creates the dir", func(t *testing.T) { dirname := "/tmp/myfilecoindir" mustMakeTmpDir(require, dirname) defer mustRmDir(require, dirname) rmh := NewRepoFSWrangler(dirname, "", "1", "2") or, err := rmh.MakeNewRepo() require.NoError(err) defer mustRmDir(require, or.Name()) assert.True(fsutils.IsDir(or.Name())) }) } func TestGetNewRepoPath(t *testing.T) { tf.UnitTest(t) assert := ast.New(t) require := req.New(t) dirname := "/tmp/myfilecoindir" t.Run("Uses the new repo opt as a prefix if provided", func(t *testing.T) { rmh := NewRepoFSWrangler(dirname, "/tmp/somethingelse", "1", "2") newpath := rmh.GetNewRepoPath() rgx, err := regexp.Compile("/tmp/somethingelse_1_2_[0-9]{8}-[0-9]{6}$") require.NoError(err) assert.Regexp(rgx, newpath) }) t.Run("Adds a timestamp to the new repo dir", func(t *testing.T) { rmh := NewRepoFSWrangler(dirname, "", "1", "2") newpath := rmh.GetNewRepoPath() rgx, err := regexp.Compile("/tmp/myfilecoindir_1_2_[0-9]{8}-[0-9]{6}$") require.NoError(err) assert.Regexp(rgx, newpath) }) } func mustMakeTmpDir(require *req.Assertions, dirname string) { require.NoError(os.Mkdir(dirname, os.ModeDir|os.ModeTemporary|os.ModePerm)) } // ensure that the error condition is checked when we clean up after creating tmpdirs. func mustRmDir(require *req.Assertions, dirname string) { require.NoError(os.Remove(dirname)) }
1
18,853
this was a bad Goland auto-import that I missed in the previous PR. Thanks to @rkowalick and @acruikshank for catching it
filecoin-project-venus
go
@@ -133,10 +133,12 @@ TEST_CASE("github #2224", "[bug, molops, removeHs, query]") { } { // but if we add a query feature it's not removed RWMol m2(*mol); - m2.replaceAtom(1, new QueryAtom(1)); + auto *qa = new QueryAtom(1); + m2.replaceAtom(1, qa); m2.getAtomWithIdx(1)->setAtomicNum(1); MolOps::removeHs(m2); CHECK(m2.getNumAtoms() == 2); + delete qa; } } }
1
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do // this in one cpp file #include "catch.hpp" #include <GraphMol/RDKitBase.h> #include <GraphMol/RDKitQueries.h> #include <GraphMol/Chirality.h> #include <GraphMol/FileParsers/FileParsers.h> #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include <GraphMol/SmilesParse/SmartsWrite.h> using namespace RDKit; TEST_CASE("SMILES Parsing works", "[molops]") { std::unique_ptr<RWMol> mol(SmilesToMol("C1CC1")); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 3); } TEST_CASE("Sanitization tests", "[molops]") { std::unique_ptr<RWMol> mol(SmilesToMol("C1=CC=CC=C1Cc2ccccc2", false, false)); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 13); SECTION("properties") { mol->updatePropertyCache(); CHECK(mol->getAtomWithIdx(0)->getTotalNumHs() == 1); CHECK(!mol->getAtomWithIdx(0)->getIsAromatic()); CHECK(mol->getAtomWithIdx(7)->getIsAromatic()); SECTION("aromaticity") { unsigned int opThatFailed; MolOps::sanitizeMol(*mol, opThatFailed, MolOps::SANITIZE_SETAROMATICITY); // mol->debugMol(std::cerr); CHECK(mol->getAtomWithIdx(7)->getIsAromatic()); // blocked by #1730 // CHECK(mol->getAtomWithIdx(0)->getIsAromatic()); } SECTION("kekulize") { unsigned int opThatFailed; MolOps::sanitizeMol(*mol, opThatFailed, MolOps::SANITIZE_KEKULIZE); CHECK(!mol->getAtomWithIdx(0)->getIsAromatic()); CHECK(!mol->getAtomWithIdx(7)->getIsAromatic()); } } } TEST_CASE("Github #2062", "[bug, molops]") { SmilesParserParams ps; ps.removeHs = false; ps.sanitize = true; std::unique_ptr<RWMol> mol(SmilesToMol("[C:1][C:2]([H:3])([H])[O:4][H]", ps)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 6); mol->getAtomWithIdx(1)->setProp("intProp", 42); MolOps::mergeQueryHs(*mol); CHECK(mol->getNumAtoms() == 3); SECTION("basics") { CHECK(mol->getAtomWithIdx(1)->getAtomMapNum() == 2); } SECTION("other props") { REQUIRE(mol->getAtomWithIdx(1)->hasProp("intProp")); CHECK(mol->getAtomWithIdx(1)->getProp<int>("intProp") == 42); } } TEST_CASE("Github #2086", "[bug, molops]") { SECTION("reported version") { auto mol = "C1CCCC1"_smiles; REQUIRE(mol); MolOps::addHs(*mol); REQUIRE(mol->getNumAtoms() == 15); mol->removeBond(4, 13); MolOps::removeHs(*mol); REQUIRE(mol->getNumAtoms() == 6); } } TEST_CASE("github #299", "[bug, molops, SSSR]") { SECTION("simplified") { auto mol = "C13%13%14.C124%18.C25%13%15.C368%17.C4679.C75%10%17.C8%11%14%16.C9%11%12%18.C%10%12%15%16"_smiles; REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 9); } SECTION("old example from molopstest") { auto mol = "C123C45C11C44C55C22C33C14C523"_smiles; REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 9); } SECTION("carborane") { std::unique_ptr<RWMol> mol( SmilesToMol("[B]1234[B]567[B]118[B]229[B]33%10[B]454[B]656[B]711[B]822[" "C]933[B]%1045[C]6123", 0, false)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 12); mol->updatePropertyCache(false); MolOps::findSSSR(*mol); REQUIRE(mol->getRingInfo()->isInitialized()); } SECTION("original report from ChEbI") { std::string pathName = getenv("RDBASE"); pathName += "/Code/GraphMol/test_data/"; std::unique_ptr<RWMol> mol( MolFileToMol(pathName + "ChEBI_50252.mol", false)); REQUIRE(mol); CHECK(mol->getNumAtoms() == 80); mol->updatePropertyCache(false); MolOps::findSSSR(*mol); REQUIRE(mol->getRingInfo()->isInitialized()); } } TEST_CASE("github #2224", "[bug, molops, removeHs, query]") { SECTION("the original report") { std::string pathName = getenv("RDBASE"); pathName += "/Code/GraphMol/test_data/"; std::unique_ptr<RWMol> mol(MolFileToMol(pathName + "github2224_1.mol")); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 7); } SECTION("basics") { SmilesParserParams ps; ps.removeHs = false; ps.sanitize = true; std::unique_ptr<ROMol> mol(SmilesToMol("C[H]", ps)); REQUIRE(mol); REQUIRE(mol->getNumAtoms() == 2); { // The H without a query is removed std::unique_ptr<ROMol> m2(MolOps::removeHs(*mol)); CHECK(m2->getNumAtoms() == 1); } { // but if we add a query feature it's not removed RWMol m2(*mol); m2.replaceAtom(1, new QueryAtom(1)); m2.getAtomWithIdx(1)->setAtomicNum(1); MolOps::removeHs(m2); CHECK(m2.getNumAtoms() == 2); } } } TEST_CASE( "github #2268: Recognize N in three-membered rings as potentially chiral", "[bug,stereo]") { SECTION("basics: N in a 3 ring") { const auto mol = "C[N@]1CC1C"_smiles; REQUIRE(mol); CHECK(mol->getAtomWithIdx(1)->getChiralTag() != Atom::CHI_UNSPECIFIED); } SECTION("basics: N in a 4 ring") { const auto mol = "C[N@]1CCC1C"_smiles; REQUIRE(mol); CHECK(mol->getAtomWithIdx(1)->getChiralTag() == Atom::CHI_UNSPECIFIED); } SECTION("the original molecule") { std::string mb = R"CTAB( Mrv1810 02131915062D 18 20 0 0 1 0 999 V2000 -0.7207 -1.3415 0.0000 N 0 0 1 0 0 0 0 0 0 0 0 0 -0.0583 -0.8416 0.0000 C 0 0 2 0 0 0 0 0 0 0 0 0 -0.0083 -1.7540 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -1.3956 -0.8666 0.0000 C 0 0 2 0 0 0 0 0 0 0 0 0 -0.3250 -0.0667 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.1955 -0.6499 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.1499 -0.0792 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6541 -0.4292 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.7830 -1.2291 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -1.6081 -1.6623 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.4080 0.1500 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 1.3665 -0.8374 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6416 0.3958 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -3.1996 0.3708 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -3.4121 1.1624 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.3498 0.8207 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.0790 -0.4167 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.0665 0.4083 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2 1 1 0 0 0 0 1 3 1 1 0 0 0 4 1 1 0 0 0 0 5 2 1 0 0 0 0 4 6 1 0 0 0 0 7 4 1 0 0 0 0 2 8 1 6 0 0 0 9 6 2 0 0 0 0 4 10 1 1 0 0 0 11 6 1 0 0 0 0 12 8 2 0 0 0 0 13 8 1 0 0 0 0 14 11 1 0 0 0 0 15 14 1 0 0 0 0 16 13 2 0 0 0 0 17 12 1 0 0 0 0 18 16 1 0 0 0 0 2 3 1 0 0 0 0 5 7 1 0 0 0 0 17 18 2 0 0 0 0 M END )CTAB"; std::unique_ptr<ROMol> mol(MolBlockToMol(mb)); REQUIRE(mol); CHECK(mol->getAtomWithIdx(0)->getChiralTag() != Atom::CHI_UNSPECIFIED); } } TEST_CASE("github #2244", "[bug, molops, stereo]") { SECTION("the original report") { auto mol = "CC=CC=CC"_smiles; REQUIRE(mol); MolOps::findPotentialStereoBonds(*mol, true); CHECK(mol->getBondWithIdx(1)->getStereo() == Bond::STEREOANY); CHECK(mol->getBondWithIdx(3)->getStereo() == Bond::STEREOANY); mol->getBondWithIdx(3)->setStereo(Bond::STEREONONE); MolOps::findPotentialStereoBonds(*mol, true); CHECK(mol->getBondWithIdx(1)->getStereo() == Bond::STEREOANY); CHECK(mol->getBondWithIdx(3)->getStereo() == Bond::STEREOANY); } } TEST_CASE( "github #2258: heterocycles with exocyclic bonds not failing valence check", "[bug, molops]") { SECTION("the original report") { std::vector<std::string> smiles = {"C=n1ccnc1", "C#n1ccnc1"}; for (auto smi : smiles) { CHECK_THROWS_AS(SmilesToMol(smi), MolSanitizeException); } } }
1
18,992
Good catch here. Reading the code, it actually seems kind of dumb that `replaceAtom` needs to copy the atom it's passed. Ah well, we're more or less stuck with that.
rdkit-rdkit
cpp
@@ -560,6 +560,17 @@ void wlr_output_schedule_frame(struct wlr_output *output) { wl_event_loop_add_idle(ev, schedule_frame_handle_idle_timer, output); } +void wlr_output_send_present(struct wlr_output *output, struct timespec *when, + unsigned seq, uint32_t flags) { + struct wlr_output_event_present event = { + .output = output, + .when = when, + .seq = seq, + .flags = flags, + }; + wlr_signal_emit_safe(&output->events.present, &event); +} + bool wlr_output_set_gamma(struct wlr_output *output, size_t size, const uint16_t *r, const uint16_t *g, const uint16_t *b) { if (!output->impl->set_gamma) {
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <stdlib.h> #include <string.h> #include <tgmath.h> #include <time.h> #include <wayland-server.h> #include <wlr/interfaces/wlr_output.h> #include <wlr/render/wlr_renderer.h> #include <wlr/types/wlr_box.h> #include <wlr/types/wlr_matrix.h> #include <wlr/types/wlr_output.h> #include <wlr/types/wlr_seat.h> #include <wlr/types/wlr_surface.h> #include <wlr/util/log.h> #include <wlr/util/region.h> #include "util/signal.h" #define OUTPUT_VERSION 3 static void output_send_to_resource(struct wl_resource *resource) { struct wlr_output *output = wlr_output_from_resource(resource); const uint32_t version = wl_resource_get_version(resource); if (version >= WL_OUTPUT_GEOMETRY_SINCE_VERSION) { wl_output_send_geometry(resource, output->lx, output->ly, output->phys_width, output->phys_height, output->subpixel, output->make, output->model, output->transform); } if (version >= WL_OUTPUT_MODE_SINCE_VERSION) { struct wlr_output_mode *mode; wl_list_for_each(mode, &output->modes, link) { uint32_t flags = mode->flags & WL_OUTPUT_MODE_PREFERRED; if (output->current_mode == mode) { flags |= WL_OUTPUT_MODE_CURRENT; } wl_output_send_mode(resource, flags, mode->width, mode->height, mode->refresh); } if (wl_list_length(&output->modes) == 0) { // Output has no mode, send the current width/height wl_output_send_mode(resource, WL_OUTPUT_MODE_CURRENT, output->width, output->height, output->refresh); } } if (version >= WL_OUTPUT_SCALE_SINCE_VERSION) { wl_output_send_scale(resource, (uint32_t)ceil(output->scale)); } if (version >= WL_OUTPUT_DONE_SINCE_VERSION) { wl_output_send_done(resource); } } static void output_send_current_mode_to_resource( struct wl_resource *resource) { struct wlr_output *output = wlr_output_from_resource(resource); const uint32_t version = wl_resource_get_version(resource); if (version < WL_OUTPUT_MODE_SINCE_VERSION) { return; } if (output->current_mode != NULL) { struct wlr_output_mode *mode = output->current_mode; uint32_t flags = mode->flags & WL_OUTPUT_MODE_PREFERRED; wl_output_send_mode(resource, flags | WL_OUTPUT_MODE_CURRENT, mode->width, mode->height, mode->refresh); } else { // Output has no mode wl_output_send_mode(resource, WL_OUTPUT_MODE_CURRENT, output->width, output->height, output->refresh); } if (version >= WL_OUTPUT_DONE_SINCE_VERSION) { wl_output_send_done(resource); } } static void output_handle_resource_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); } static void output_handle_release(struct wl_client *client, struct wl_resource *resource) { wl_resource_destroy(resource); } static const struct wl_output_interface output_impl = { .release = output_handle_release, }; static void output_bind(struct wl_client *wl_client, void *data, uint32_t version, uint32_t id) { struct wlr_output *output = data; struct wl_resource *resource = wl_resource_create(wl_client, &wl_output_interface, version, id); if (resource == NULL) { wl_client_post_no_memory(wl_client); return; } wl_resource_set_implementation(resource, &output_impl, output, output_handle_resource_destroy); wl_list_insert(&output->resources, wl_resource_get_link(resource)); output_send_to_resource(resource); } void wlr_output_create_global(struct wlr_output *output) { if (output->global != NULL) { return; } output->global = wl_global_create(output->display, &wl_output_interface, OUTPUT_VERSION, output, output_bind); if (output->global == NULL) { wlr_log(WLR_ERROR, "Failed to allocate wl_output global"); } } void wlr_output_destroy_global(struct wlr_output *output) { if (output->global == NULL) { return; } // Make all output resources inert struct wl_resource *resource, *tmp; wl_resource_for_each_safe(resource, tmp, &output->resources) { wl_resource_set_user_data(resource, NULL); wl_list_remove(wl_resource_get_link(resource)); wl_list_init(wl_resource_get_link(resource)); } wl_global_destroy(output->global); output->global = NULL; } void wlr_output_update_enabled(struct wlr_output *output, bool enabled) { if (output->enabled == enabled) { return; } output->enabled = enabled; wlr_signal_emit_safe(&output->events.enable, output); } static void output_update_matrix(struct wlr_output *output) { wlr_matrix_projection(output->transform_matrix, output->width, output->height, output->transform); } bool wlr_output_enable(struct wlr_output *output, bool enable) { if (output->enabled == enable) { return true; } if (output->impl->enable) { return output->impl->enable(output, enable); } return false; } bool wlr_output_set_mode(struct wlr_output *output, struct wlr_output_mode *mode) { if (!output->impl || !output->impl->set_mode) { return false; } return output->impl->set_mode(output, mode); } bool wlr_output_set_custom_mode(struct wlr_output *output, int32_t width, int32_t height, int32_t refresh) { if (!output->impl || !output->impl->set_custom_mode) { return false; } return output->impl->set_custom_mode(output, width, height, refresh); } void wlr_output_update_mode(struct wlr_output *output, struct wlr_output_mode *mode) { output->current_mode = mode; wlr_output_update_custom_mode(output, mode->width, mode->height, mode->refresh); } void wlr_output_update_custom_mode(struct wlr_output *output, int32_t width, int32_t height, int32_t refresh) { if (output->width == width && output->height == height && output->refresh == refresh) { return; } output->width = width; output->height = height; output_update_matrix(output); output->refresh = refresh; struct wl_resource *resource; wl_resource_for_each(resource, &output->resources) { output_send_current_mode_to_resource(resource); } wlr_signal_emit_safe(&output->events.mode, output); } void wlr_output_set_transform(struct wlr_output *output, enum wl_output_transform transform) { output->impl->transform(output, transform); output_update_matrix(output); // TODO: only send geometry and done struct wl_resource *resource; wl_resource_for_each(resource, &output->resources) { output_send_to_resource(resource); } wlr_signal_emit_safe(&output->events.transform, output); } void wlr_output_set_position(struct wlr_output *output, int32_t lx, int32_t ly) { if (lx == output->lx && ly == output->ly) { return; } output->lx = lx; output->ly = ly; // TODO: only send geometry and done struct wl_resource *resource; wl_resource_for_each(resource, &output->resources) { output_send_to_resource(resource); } } void wlr_output_set_scale(struct wlr_output *output, float scale) { if (output->scale == scale) { return; } output->scale = scale; // TODO: only send mode and done struct wl_resource *resource; wl_resource_for_each(resource, &output->resources) { output_send_to_resource(resource); } wlr_signal_emit_safe(&output->events.scale, output); } static void handle_display_destroy(struct wl_listener *listener, void *data) { struct wlr_output *output = wl_container_of(listener, output, display_destroy); wlr_output_destroy_global(output); } void wlr_output_init(struct wlr_output *output, struct wlr_backend *backend, const struct wlr_output_impl *impl, struct wl_display *display) { assert(impl->make_current && impl->swap_buffers && impl->transform); if (impl->set_cursor || impl->move_cursor) { assert(impl->set_cursor && impl->move_cursor); } output->backend = backend; output->impl = impl; output->display = display; wl_list_init(&output->modes); output->transform = WL_OUTPUT_TRANSFORM_NORMAL; output->scale = 1; wl_list_init(&output->cursors); wl_list_init(&output->resources); wl_signal_init(&output->events.frame); wl_signal_init(&output->events.needs_swap); wl_signal_init(&output->events.swap_buffers); wl_signal_init(&output->events.enable); wl_signal_init(&output->events.mode); wl_signal_init(&output->events.scale); wl_signal_init(&output->events.transform); wl_signal_init(&output->events.destroy); pixman_region32_init(&output->damage); const char *no_hardware_cursors = getenv("WLR_NO_HARDWARE_CURSORS"); if (no_hardware_cursors != NULL && strcmp(no_hardware_cursors, "1") == 0) { wlr_log(WLR_DEBUG, "WLR_NO_HARDWARE_CURSORS set, forcing software cursors"); output->software_cursor_locks = 1; } output->display_destroy.notify = handle_display_destroy; wl_display_add_destroy_listener(display, &output->display_destroy); output->frame_pending = true; } void wlr_output_destroy(struct wlr_output *output) { if (!output) { return; } wl_list_remove(&output->display_destroy.link); wlr_output_destroy_global(output); wlr_output_set_fullscreen_surface(output, NULL); wlr_signal_emit_safe(&output->events.destroy, output); struct wlr_output_mode *mode, *tmp_mode; wl_list_for_each_safe(mode, tmp_mode, &output->modes, link) { wl_list_remove(&mode->link); free(mode); } struct wlr_output_cursor *cursor, *tmp_cursor; wl_list_for_each_safe(cursor, tmp_cursor, &output->cursors, link) { wlr_output_cursor_destroy(cursor); } pixman_region32_fini(&output->damage); if (output->impl && output->impl->destroy) { output->impl->destroy(output); } else { free(output); } } void wlr_output_transformed_resolution(struct wlr_output *output, int *width, int *height) { if (output->transform % 2 == 0) { *width = output->width; *height = output->height; } else { *width = output->height; *height = output->width; } } void wlr_output_effective_resolution(struct wlr_output *output, int *width, int *height) { wlr_output_transformed_resolution(output, width, height); *width /= output->scale; *height /= output->scale; } bool wlr_output_make_current(struct wlr_output *output, int *buffer_age) { return output->impl->make_current(output, buffer_age); } static void output_scissor(struct wlr_output *output, pixman_box32_t *rect) { struct wlr_renderer *renderer = wlr_backend_get_renderer(output->backend); assert(renderer); struct wlr_box box = { .x = rect->x1, .y = rect->y1, .width = rect->x2 - rect->x1, .height = rect->y2 - rect->y1, }; int ow, oh; wlr_output_transformed_resolution(output, &ow, &oh); // Scissor is in renderer coordinates, ie. upside down enum wl_output_transform transform = wlr_output_transform_invert(output->transform); wlr_box_transform(&box, transform, ow, oh, &box); wlr_renderer_scissor(renderer, &box); } static void output_fullscreen_surface_get_box(struct wlr_output *output, struct wlr_surface *surface, struct wlr_box *box) { int width, height; wlr_output_effective_resolution(output, &width, &height); int x = (width - surface->current.width) / 2; int y = (height - surface->current.height) / 2; box->x = x * output->scale; box->y = y * output->scale; box->width = surface->current.width * output->scale; box->height = surface->current.height * output->scale; } static void output_fullscreen_surface_render(struct wlr_output *output, struct wlr_surface *surface, const struct timespec *when, pixman_region32_t *damage) { struct wlr_renderer *renderer = wlr_backend_get_renderer(output->backend); assert(renderer); struct wlr_texture *texture = wlr_surface_get_texture(surface); if (texture == NULL) { wlr_renderer_clear(renderer, (float[]){0, 0, 0, 1}); return; } struct wlr_box box; output_fullscreen_surface_get_box(output, surface, &box); float matrix[9]; enum wl_output_transform transform = wlr_output_transform_invert(surface->current.transform); wlr_matrix_project_box(matrix, &box, transform, 0, output->transform_matrix); int nrects; pixman_box32_t *rects = pixman_region32_rectangles(damage, &nrects); for (int i = 0; i < nrects; ++i) { output_scissor(output, &rects[i]); wlr_renderer_clear(renderer, (float[]){0, 0, 0, 1}); wlr_render_texture_with_matrix(surface->renderer, texture, matrix, 1.0f); } wlr_renderer_scissor(renderer, NULL); wlr_surface_send_frame_done(surface, when); } /** * Returns the cursor box, scaled for its output. */ static void output_cursor_get_box(struct wlr_output_cursor *cursor, struct wlr_box *box) { box->x = cursor->x - cursor->hotspot_x; box->y = cursor->y - cursor->hotspot_y; box->width = cursor->width; box->height = cursor->height; } static void output_cursor_render(struct wlr_output_cursor *cursor, const struct timespec *when, pixman_region32_t *damage) { struct wlr_renderer *renderer = wlr_backend_get_renderer(cursor->output->backend); assert(renderer); struct wlr_texture *texture = cursor->texture; if (cursor->surface != NULL) { texture = wlr_surface_get_texture(cursor->surface); } if (texture == NULL) { return; } struct wlr_box box; output_cursor_get_box(cursor, &box); pixman_region32_t surface_damage; pixman_region32_init(&surface_damage); pixman_region32_union_rect(&surface_damage, &surface_damage, box.x, box.y, box.width, box.height); pixman_region32_intersect(&surface_damage, &surface_damage, damage); if (!pixman_region32_not_empty(&surface_damage)) { goto surface_damage_finish; } float matrix[9]; wlr_matrix_project_box(matrix, &box, WL_OUTPUT_TRANSFORM_NORMAL, 0, cursor->output->transform_matrix); int nrects; pixman_box32_t *rects = pixman_region32_rectangles(&surface_damage, &nrects); for (int i = 0; i < nrects; ++i) { output_scissor(cursor->output, &rects[i]); wlr_render_texture_with_matrix(renderer, texture, matrix, 1.0f); } wlr_renderer_scissor(renderer, NULL); if (cursor->surface != NULL) { wlr_surface_send_frame_done(cursor->surface, when); } surface_damage_finish: pixman_region32_fini(&surface_damage); } bool wlr_output_swap_buffers(struct wlr_output *output, struct timespec *when, pixman_region32_t *damage) { if (output->frame_pending) { wlr_log(WLR_ERROR, "Tried to swap buffers when a frame is pending"); return false; } if (output->idle_frame != NULL) { wl_event_source_remove(output->idle_frame); output->idle_frame = NULL; } int width, height; wlr_output_transformed_resolution(output, &width, &height); pixman_region32_t render_damage; pixman_region32_init(&render_damage); pixman_region32_union_rect(&render_damage, &render_damage, 0, 0, width, height); if (damage != NULL) { // Damage tracking supported pixman_region32_intersect(&render_damage, &render_damage, damage); } struct timespec now; if (when == NULL) { clock_gettime(CLOCK_MONOTONIC, &now); when = &now; } if (pixman_region32_not_empty(&render_damage)) { if (output->fullscreen_surface != NULL) { output_fullscreen_surface_render(output, output->fullscreen_surface, when, &render_damage); } struct wlr_output_cursor *cursor; wl_list_for_each(cursor, &output->cursors, link) { if (!cursor->enabled || !cursor->visible || output->hardware_cursor == cursor) { continue; } output_cursor_render(cursor, when, &render_damage); } } struct wlr_output_event_swap_buffers event = { .output = output, .when = when, .damage = damage, }; wlr_signal_emit_safe(&output->events.swap_buffers, &event); // Transform damage into renderer coordinates, ie. upside down enum wl_output_transform transform = wlr_output_transform_compose( wlr_output_transform_invert(output->transform), WL_OUTPUT_TRANSFORM_FLIPPED_180); wlr_region_transform(&render_damage, &render_damage, transform, width, height); if (!output->impl->swap_buffers(output, damage ? &render_damage : NULL)) { return false; } output->frame_pending = true; output->needs_swap = false; pixman_region32_clear(&output->damage); pixman_region32_fini(&render_damage); return true; } void wlr_output_send_frame(struct wlr_output *output) { output->frame_pending = false; wlr_signal_emit_safe(&output->events.frame, output); } static void schedule_frame_handle_idle_timer(void *data) { struct wlr_output *output = data; output->idle_frame = NULL; if (!output->frame_pending) { wlr_output_send_frame(output); } } void wlr_output_schedule_frame(struct wlr_output *output) { if (output->frame_pending || output->idle_frame != NULL) { return; } // TODO: ask the backend to send a frame event when appropriate instead struct wl_event_loop *ev = wl_display_get_event_loop(output->display); output->idle_frame = wl_event_loop_add_idle(ev, schedule_frame_handle_idle_timer, output); } bool wlr_output_set_gamma(struct wlr_output *output, size_t size, const uint16_t *r, const uint16_t *g, const uint16_t *b) { if (!output->impl->set_gamma) { return false; } return output->impl->set_gamma(output, size, r, g, b); } size_t wlr_output_get_gamma_size(struct wlr_output *output) { if (!output->impl->get_gamma_size) { return 0; } return output->impl->get_gamma_size(output); } bool wlr_output_export_dmabuf(struct wlr_output *output, struct wlr_dmabuf_attributes *attribs) { if (!output->impl->export_dmabuf) { return false; } return output->impl->export_dmabuf(output, attribs); } void wlr_output_update_needs_swap(struct wlr_output *output) { output->needs_swap = true; wlr_signal_emit_safe(&output->events.needs_swap, output); } void wlr_output_damage_whole(struct wlr_output *output) { int width, height; wlr_output_transformed_resolution(output, &width, &height); pixman_region32_union_rect(&output->damage, &output->damage, 0, 0, width, height); wlr_output_update_needs_swap(output); } static void output_fullscreen_surface_reset(struct wlr_output *output) { if (output->fullscreen_surface != NULL) { wl_list_remove(&output->fullscreen_surface_commit.link); wl_list_remove(&output->fullscreen_surface_destroy.link); output->fullscreen_surface = NULL; wlr_output_damage_whole(output); } } static void output_fullscreen_surface_handle_commit( struct wl_listener *listener, void *data) { struct wlr_output *output = wl_container_of(listener, output, fullscreen_surface_commit); struct wlr_surface *surface = output->fullscreen_surface; if (output->fullscreen_width != surface->current.width || output->fullscreen_height != surface->current.height) { output->fullscreen_width = surface->current.width; output->fullscreen_height = surface->current.height; wlr_output_damage_whole(output); return; } struct wlr_box box; output_fullscreen_surface_get_box(output, surface, &box); pixman_region32_t damage; pixman_region32_init(&damage); pixman_region32_copy(&damage, &surface->current.surface_damage); wlr_region_scale(&damage, &damage, output->scale); pixman_region32_translate(&damage, box.x, box.y); pixman_region32_union(&output->damage, &output->damage, &damage); pixman_region32_fini(&damage); wlr_output_update_needs_swap(output); } static void output_fullscreen_surface_handle_destroy( struct wl_listener *listener, void *data) { struct wlr_output *output = wl_container_of(listener, output, fullscreen_surface_destroy); output_fullscreen_surface_reset(output); } void wlr_output_set_fullscreen_surface(struct wlr_output *output, struct wlr_surface *surface) { // TODO: hardware fullscreen if (output->fullscreen_surface == surface) { return; } output_fullscreen_surface_reset(output); output->fullscreen_surface = surface; wlr_output_damage_whole(output); if (surface == NULL) { return; } output->fullscreen_surface_commit.notify = output_fullscreen_surface_handle_commit; wl_signal_add(&surface->events.commit, &output->fullscreen_surface_commit); output->fullscreen_surface_destroy.notify = output_fullscreen_surface_handle_destroy; wl_signal_add(&surface->events.destroy, &output->fullscreen_surface_destroy); } struct wlr_output *wlr_output_from_resource(struct wl_resource *resource) { assert(wl_resource_instance_of(resource, &wl_output_interface, &output_impl)); return wl_resource_get_user_data(resource); } static void output_cursor_damage_whole(struct wlr_output_cursor *cursor); void wlr_output_lock_software_cursors(struct wlr_output *output, bool lock) { if (lock) { ++output->software_cursor_locks; } else { assert(output->software_cursor_locks > 0); --output->software_cursor_locks; } wlr_log(WLR_DEBUG, "%s hardware cursors on output '%s' (locks: %d)", lock ? "Disabling" : "Enabling", output->name, output->software_cursor_locks); if (output->software_cursor_locks > 0 && output->hardware_cursor != NULL) { assert(output->impl->set_cursor); output->impl->set_cursor(output, NULL, 1, WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true); output_cursor_damage_whole(output->hardware_cursor); output->hardware_cursor = NULL; } // If it's possible to use hardware cursors again, don't switch immediately // since a recorder is likely to lock software cursors for the next frame // again. } static void output_cursor_damage_whole(struct wlr_output_cursor *cursor) { struct wlr_box box; output_cursor_get_box(cursor, &box); pixman_region32_union_rect(&cursor->output->damage, &cursor->output->damage, box.x, box.y, box.width, box.height); wlr_output_update_needs_swap(cursor->output); } static void output_cursor_reset(struct wlr_output_cursor *cursor) { if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); } if (cursor->surface != NULL) { wl_list_remove(&cursor->surface_commit.link); wl_list_remove(&cursor->surface_destroy.link); cursor->surface = NULL; } } static void output_cursor_update_visible(struct wlr_output_cursor *cursor) { struct wlr_box output_box; output_box.x = output_box.y = 0; wlr_output_transformed_resolution(cursor->output, &output_box.width, &output_box.height); struct wlr_box cursor_box; output_cursor_get_box(cursor, &cursor_box); struct wlr_box intersection; bool visible = wlr_box_intersection(&output_box, &cursor_box, &intersection); if (cursor->surface != NULL) { if (cursor->visible && !visible) { wlr_surface_send_leave(cursor->surface, cursor->output); } if (!cursor->visible && visible) { wlr_surface_send_enter(cursor->surface, cursor->output); } } cursor->visible = visible; } static bool output_cursor_attempt_hardware(struct wlr_output_cursor *cursor) { int32_t scale = cursor->output->scale; enum wl_output_transform transform = WL_OUTPUT_TRANSFORM_NORMAL; struct wlr_texture *texture = cursor->texture; if (cursor->surface != NULL) { texture = wlr_surface_get_texture(cursor->surface); scale = cursor->surface->current.scale; transform = cursor->surface->current.transform; } if (cursor->output->software_cursor_locks > 0) { return false; } struct wlr_output_cursor *hwcur = cursor->output->hardware_cursor; if (cursor->output->impl->set_cursor && (hwcur == NULL || hwcur == cursor)) { // If the cursor was hidden or was a software cursor, the hardware // cursor position is outdated assert(cursor->output->impl->move_cursor); cursor->output->impl->move_cursor(cursor->output, (int)cursor->x, (int)cursor->y); if (cursor->output->impl->set_cursor(cursor->output, texture, scale, transform, cursor->hotspot_x, cursor->hotspot_y, true)) { cursor->output->hardware_cursor = cursor; return true; } } return false; } bool wlr_output_cursor_set_image(struct wlr_output_cursor *cursor, const uint8_t *pixels, int32_t stride, uint32_t width, uint32_t height, int32_t hotspot_x, int32_t hotspot_y) { struct wlr_renderer *renderer = wlr_backend_get_renderer(cursor->output->backend); assert(renderer); output_cursor_reset(cursor); cursor->width = width; cursor->height = height; cursor->hotspot_x = hotspot_x; cursor->hotspot_y = hotspot_y; output_cursor_update_visible(cursor); wlr_texture_destroy(cursor->texture); cursor->texture = NULL; cursor->enabled = false; if (pixels != NULL) { cursor->texture = wlr_texture_from_pixels(renderer, WL_SHM_FORMAT_ARGB8888, stride, width, height, pixels); if (cursor->texture == NULL) { return false; } cursor->enabled = true; } if (output_cursor_attempt_hardware(cursor)) { return true; } wlr_log(WLR_DEBUG, "Falling back to software cursor on output '%s'", cursor->output->name); output_cursor_damage_whole(cursor); return true; } static void output_cursor_commit(struct wlr_output_cursor *cursor, bool update_hotspot) { if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); } struct wlr_surface *surface = cursor->surface; assert(surface != NULL); // Some clients commit a cursor surface with a NULL buffer to hide it. cursor->enabled = wlr_surface_has_buffer(surface); cursor->width = surface->current.width * cursor->output->scale; cursor->height = surface->current.height * cursor->output->scale; if (update_hotspot) { cursor->hotspot_x -= surface->current.dx * cursor->output->scale; cursor->hotspot_y -= surface->current.dy * cursor->output->scale; } if (output_cursor_attempt_hardware(cursor)) { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); wlr_surface_send_frame_done(surface, &now); return; } // Fallback to software cursor output_cursor_damage_whole(cursor); } static void output_cursor_handle_commit(struct wl_listener *listener, void *data) { struct wlr_output_cursor *cursor = wl_container_of(listener, cursor, surface_commit); output_cursor_commit(cursor, true); } static void output_cursor_handle_destroy(struct wl_listener *listener, void *data) { struct wlr_output_cursor *cursor = wl_container_of(listener, cursor, surface_destroy); output_cursor_reset(cursor); } void wlr_output_cursor_set_surface(struct wlr_output_cursor *cursor, struct wlr_surface *surface, int32_t hotspot_x, int32_t hotspot_y) { hotspot_x *= cursor->output->scale; hotspot_y *= cursor->output->scale; if (surface && surface == cursor->surface) { // Only update the hotspot: surface hasn't changed if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); } cursor->hotspot_x = hotspot_x; cursor->hotspot_y = hotspot_y; if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); } else { assert(cursor->output->impl->set_cursor); cursor->output->impl->set_cursor(cursor->output, NULL, 1, WL_OUTPUT_TRANSFORM_NORMAL, hotspot_x, hotspot_y, false); } return; } output_cursor_reset(cursor); cursor->surface = surface; cursor->hotspot_x = hotspot_x; cursor->hotspot_y = hotspot_y; if (surface != NULL) { wl_signal_add(&surface->events.commit, &cursor->surface_commit); wl_signal_add(&surface->events.destroy, &cursor->surface_destroy); output_cursor_commit(cursor, false); cursor->visible = false; output_cursor_update_visible(cursor); } else { cursor->enabled = false; cursor->width = 0; cursor->height = 0; if (cursor->output->hardware_cursor == cursor) { assert(cursor->output->impl->set_cursor); cursor->output->impl->set_cursor(cursor->output, NULL, 1, WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true); } } } bool wlr_output_cursor_move(struct wlr_output_cursor *cursor, double x, double y) { if (cursor->x == x && cursor->y == y) { return true; } if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); } bool was_visible = cursor->visible; x *= cursor->output->scale; y *= cursor->output->scale; cursor->x = x; cursor->y = y; output_cursor_update_visible(cursor); if (!was_visible && !cursor->visible) { // Cursor is still hidden, do nothing return true; } if (cursor->output->hardware_cursor != cursor) { output_cursor_damage_whole(cursor); return true; } assert(cursor->output->impl->move_cursor); return cursor->output->impl->move_cursor(cursor->output, (int)x, (int)y); } struct wlr_output_cursor *wlr_output_cursor_create(struct wlr_output *output) { struct wlr_output_cursor *cursor = calloc(1, sizeof(struct wlr_output_cursor)); if (cursor == NULL) { return NULL; } cursor->output = output; wl_signal_init(&cursor->events.destroy); wl_list_init(&cursor->surface_commit.link); cursor->surface_commit.notify = output_cursor_handle_commit; wl_list_init(&cursor->surface_destroy.link); cursor->surface_destroy.notify = output_cursor_handle_destroy; wl_list_insert(&output->cursors, &cursor->link); cursor->visible = true; // default position is at (0, 0) return cursor; } void wlr_output_cursor_destroy(struct wlr_output_cursor *cursor) { if (cursor == NULL) { return; } output_cursor_reset(cursor); wlr_signal_emit_safe(&cursor->events.destroy, cursor); if (cursor->output->hardware_cursor == cursor) { // If this cursor was the hardware cursor, disable it if (cursor->output->impl->set_cursor) { cursor->output->impl->set_cursor(cursor->output, NULL, 1, WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true); } cursor->output->hardware_cursor = NULL; } wlr_texture_destroy(cursor->texture); wl_list_remove(&cursor->link); free(cursor); } enum wl_output_transform wlr_output_transform_invert( enum wl_output_transform tr) { if ((tr & WL_OUTPUT_TRANSFORM_90) && !(tr & WL_OUTPUT_TRANSFORM_FLIPPED)) { tr ^= WL_OUTPUT_TRANSFORM_180; } return tr; } enum wl_output_transform wlr_output_transform_compose( enum wl_output_transform tr_a, enum wl_output_transform tr_b) { uint32_t flipped = (tr_a ^ tr_b) & WL_OUTPUT_TRANSFORM_FLIPPED; uint32_t rotated = (tr_a + tr_b) & (WL_OUTPUT_TRANSFORM_90 | WL_OUTPUT_TRANSFORM_180); return flipped | rotated; }
1
12,633
What's the reason for adding the output pointer here? I'd expect it to be derivable from the actual handler that's called.
swaywm-wlroots
c
@@ -82,6 +82,10 @@ class Plan < ActiveRecord::Base annual_plan.price end + def annualized_savings + annualized_payment - discounted_annual_payment + end + def annual_plan_sku annual_plan.sku end
1
class Plan < ActiveRecord::Base PRIME_249_SKU = "prime-249" PROFESSIONAL_SKU = "professional" THE_WEEKLY_ITERATION_SKU = "the-weekly-iteration" has_many :checkouts has_many :subscriptions, as: :plan belongs_to :annual_plan, class_name: "Plan" validates :description, presence: true validates :price, presence: true validates :name, presence: true validates :short_description, presence: true validates :sku, presence: true include PlanForPublicListing def self.individual where includes_team: false end def self.team where includes_team: true end def self.active where active: true end def self.default individual.active.featured.ordered.first end def self.default_team team.active.featured.ordered.first end def self.basic where(sku: THE_WEEKLY_ITERATION_SKU).first end def self.popular where(sku: PROFESSIONAL_SKU).first end def popular? self == self.class.popular end def subscription_interval stripe_plan.interval end def fulfill(checkout, user) user.create_subscription( plan: self, stripe_id: checkout.stripe_subscription_id ) SubscriptionFulfillment.new(user, self).fulfill if includes_team? TeamFulfillment.new(checkout, user).fulfill end end def included_in_plan?(plan) false end def has_annual_plan? annual_plan.present? end def has_feature?(feature) public_send("includes_#{feature}?") end def annualized_payment 12 * price end def discounted_annual_payment annual_plan.price end def annual_plan_sku annual_plan.sku end private def stripe_plan @stripe_plan ||= Stripe::Plan.retrieve(sku) end end
1
14,102
I don't see tests for this (or the similar method on `Team`).
thoughtbot-upcase
rb
@@ -35,12 +35,9 @@ public class SetNetworkConnection extends WebDriverHandler<Number> implements Js @SuppressWarnings("unchecked") @Override public void setJsonParameters(Map<String, Object> allParameters) throws Exception { - Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters"); - Map<String, Object> typeMap = parameters.get("type"); - - type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()), - Boolean.parseBoolean(typeMap.get("dataEnabled").toString()), - Boolean.parseBoolean(typeMap.get("airplaneMode").toString())); + Map<String, Object> parameters = (Map<String, Object>)allParameters.get("parameters"); + Long bitmask = (Long) parameters.get("type"); + type = new ConnectionType(bitmask.intValue()); } @Override
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote.server.handler.mobile; import java.util.Map; import org.openqa.selenium.mobile.NetworkConnection.ConnectionType; import org.openqa.selenium.remote.server.JsonParametersAware; import org.openqa.selenium.remote.server.Session; import org.openqa.selenium.remote.server.handler.WebDriverHandler; import org.openqa.selenium.remote.server.handler.html5.Utils; public class SetNetworkConnection extends WebDriverHandler<Number> implements JsonParametersAware { private volatile ConnectionType type; public SetNetworkConnection(Session session) { super(session); } @SuppressWarnings("unchecked") @Override public void setJsonParameters(Map<String, Object> allParameters) throws Exception { Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters"); Map<String, Object> typeMap = parameters.get("type"); type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()), Boolean.parseBoolean(typeMap.get("dataEnabled").toString()), Boolean.parseBoolean(typeMap.get("airplaneMode").toString())); } @Override public Number call() throws Exception { return Integer.parseInt(Utils.getNetworkConnection(getUnwrappedDriver()).setNetworkConnection(type).toString()); } @Override public String toString() { return String.format("[set network connection : %s]", type.toString()); } }
1
13,952
should use Number instead of Long
SeleniumHQ-selenium
java
@@ -281,8 +281,12 @@ func (c *client) readLoop() { c.cache.subs = 0 if err := c.parse(b[:n]); err != nil { - // handled inline - if err != ErrMaxPayload && err != ErrAuthorization { + // If client connection has been closed, simply return, + // otherwise report a generic parsing error. + c.mu.Lock() + closed := c.nc == nil + c.mu.Unlock() + if !closed { c.Errorf("Error reading from client: %s", err.Error()) c.sendErr("Parser Error") c.closeConnection()
1
// Copyright 2012-2016 Apcera Inc. All rights reserved. package server import ( "bufio" "crypto/tls" "encoding/json" "fmt" "math/rand" "net" "sync" "sync/atomic" "time" ) // Type of client connection. const ( // CLIENT is an end user. CLIENT = iota // ROUTER is another router in the cluster. ROUTER ) const ( // Original Client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ ClientProtoZero = iota // This signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. ClientProtoInfo ) func init() { rand.Seed(time.Now().UnixNano()) } const ( // Scratch buffer size for the processMsg() calls. msgScratchSize = 512 msgHeadProto = "MSG " ) // For controlling dynamic buffer sizes. const ( startBufSize = 512 // For INFO/CONNECT block minBufSize = 128 maxBufSize = 65536 ) // Represent client booleans with a bitmask type clientFlag byte // Some client state represented as flags const ( connectReceived clientFlag = 1 << iota // The CONNECT proto has been received firstPongSent // The first PONG has been sent infoUpdated // The server's Info object has changed before first PONG was sent ) // set the flag (would be equivalent to set the boolean to true) func (cf *clientFlag) set(c clientFlag) { *cf |= c } // isSet returns true if the flag is set, false otherwise func (cf clientFlag) isSet(c clientFlag) bool { return cf&c != 0 } // setIfNotSet will set the flag `c` only if that flag was not already // set and return true to indicate that the flag has been set. Returns // false otherwise. func (cf *clientFlag) setIfNotSet(c clientFlag) bool { if *cf&c == 0 { *cf |= c return true } return false } // clear unset the flag (would be equivalent to set the boolean to false) func (cf *clientFlag) clear(c clientFlag) { *cf &= ^c } type client struct { // Here first because of use of atomics, and memory alignment. stats mu sync.Mutex typ int cid uint64 lang string opts clientOpts start time.Time nc net.Conn mpay int ncs string bw *bufio.Writer srv *Server subs map[string]*subscription perms *permissions cache readCache pcd map[*client]struct{} atmr *time.Timer ptmr *time.Timer pout int wfc int msgb [msgScratchSize]byte last time.Time parseState route *route debug bool trace bool flags clientFlag // Compact booleans into a single field. Size will be increased when needed. } type permissions struct { sub *Sublist pub *Sublist pcache map[string]bool } const ( maxResultCacheSize = 512 maxPermCacheSize = 32 pruneSize = 16 ) // Used in readloop to cache hot subject lookups and group statistics. type readCache struct { genid uint64 results map[string]*SublistResult prand *rand.Rand inMsgs int inBytes int subs int } func (c *client) String() (id string) { return c.ncs } func (c *client) GetOpts() *clientOpts { return &c.opts } // GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil // otherwise. Implements the ClientAuth interface. func (c *client) GetTLSConnectionState() *tls.ConnectionState { tc, ok := c.nc.(*tls.Conn) if !ok { return nil } state := tc.ConnectionState() return &state } type subscription struct { client *client subject []byte queue []byte sid []byte nm int64 max int64 } type clientOpts struct { Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` SslRequired bool `json:"ssl_required"` Authorization string `json:"auth_token"` Username string `json:"user"` Password string `json:"pass"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` } var defaultOpts = clientOpts{Verbose: true, Pedantic: true} func init() { rand.Seed(time.Now().UnixNano()) } // Lock should be held func (c *client) initClient() { s := c.srv c.cid = atomic.AddUint64(&s.gcid, 1) c.bw = bufio.NewWriterSize(c.nc, startBufSize) c.subs = make(map[string]*subscription) c.debug = (atomic.LoadInt32(&debug) != 0) c.trace = (atomic.LoadInt32(&trace) != 0) // This is a scratch buffer used for processMsg() // The msg header starts with "MSG ", // in bytes that is [77 83 71 32]. c.msgb = [msgScratchSize]byte{77, 83, 71, 32} // This is to track pending clients that have data to be flushed // after we process inbound msgs from our own connection. c.pcd = make(map[*client]struct{}) // snapshot the string version of the connection conn := "-" if ip, ok := c.nc.(*net.TCPConn); ok { addr := ip.RemoteAddr().(*net.TCPAddr) conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port) } switch c.typ { case CLIENT: c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid) case ROUTER: c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid) } } // RegisterUser allows auth to call back into a new client // with the authenticated user. This is used to map any permissions // into the client. func (c *client) RegisterUser(user *User) { if user.Permissions == nil { return } // Process Permissions and map into client connection structures. c.mu.Lock() defer c.mu.Unlock() // Pre-allocate all to simplify checks later. c.perms = &permissions{} c.perms.sub = NewSublist() c.perms.pub = NewSublist() c.perms.pcache = make(map[string]bool) // Loop over publish permissions for _, pubSubject := range user.Permissions.Publish { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.Insert(sub) } // Loop over subscribe permissions for _, subSubject := range user.Permissions.Subscribe { sub := &subscription{subject: []byte(subSubject)} c.perms.sub.Insert(sub) } } func (c *client) readLoop() { // Grab the connection off the client, it will be cleared on a close. // We check for that after the loop, but want to avoid a nil dereference c.mu.Lock() nc := c.nc s := c.srv defer s.grWG.Done() c.mu.Unlock() if nc == nil { return } // Start read buffer. b := make([]byte, startBufSize) for { n, err := nc.Read(b) if err != nil { c.closeConnection() return } // Grab for updates for last activity. last := time.Now() // Clear inbound stats cache c.cache.inMsgs = 0 c.cache.inBytes = 0 c.cache.subs = 0 if err := c.parse(b[:n]); err != nil { // handled inline if err != ErrMaxPayload && err != ErrAuthorization { c.Errorf("Error reading from client: %s", err.Error()) c.sendErr("Parser Error") c.closeConnection() } return } // Updates stats for client and server that were collected // from parsing through the buffer. atomic.AddInt64(&c.inMsgs, int64(c.cache.inMsgs)) atomic.AddInt64(&c.inBytes, int64(c.cache.inBytes)) atomic.AddInt64(&s.inMsgs, int64(c.cache.inMsgs)) atomic.AddInt64(&s.inBytes, int64(c.cache.inBytes)) // Check pending clients for flush. for cp := range c.pcd { // Flush those in the set cp.mu.Lock() if cp.nc != nil { // Gather the flush calls that happened before now. // This is a signal into us about dynamic buffer allocation tuning. wfc := cp.wfc cp.wfc = 0 cp.nc.SetWriteDeadline(time.Now().Add(s.opts.WriteDeadline)) err := cp.bw.Flush() cp.nc.SetWriteDeadline(time.Time{}) if err != nil { c.Debugf("Error flushing: %v", err) cp.mu.Unlock() cp.closeConnection() cp.mu.Lock() } else { // Update outbound last activity. cp.last = last // Check if we should tune the buffer. sz := cp.bw.Available() // Check for expansion opportunity. if wfc > 2 && sz <= maxBufSize/2 { cp.bw = bufio.NewWriterSize(cp.nc, sz*2) } // Check for shrinking opportunity. if wfc == 0 && sz >= minBufSize*2 { cp.bw = bufio.NewWriterSize(cp.nc, sz/2) } } } cp.mu.Unlock() delete(c.pcd, cp) } // Check to see if we got closed, e.g. slow consumer c.mu.Lock() nc := c.nc // Activity based on interest changes or data/msgs. if c.cache.inMsgs > 0 || c.cache.subs > 0 { c.last = last } c.mu.Unlock() if nc == nil { return } // Update buffer size as/if needed. // Grow if n == len(b) && len(b) < maxBufSize { b = make([]byte, len(b)*2) } // Shrink, for now don't accelerate, ping/pong will eventually sort it out. if n < len(b)/2 && len(b) > minBufSize { b = make([]byte, len(b)/2) } } } func (c *client) traceMsg(msg []byte) { if !c.trace { return } // FIXME(dlc), allow limits to printable payload c.Tracef("->> MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF])) } func (c *client) traceInOp(op string, arg []byte) { c.traceOp("->> %s", op, arg) } func (c *client) traceOutOp(op string, arg []byte) { c.traceOp("<<- %s", op, arg) } func (c *client) traceOp(format, op string, arg []byte) { if !c.trace { return } opa := []interface{}{} if op != "" { opa = append(opa, op) } if arg != nil { opa = append(opa, string(arg)) } c.Tracef(format, opa) } // Process the information messages from Clients and other Routes. func (c *client) processInfo(arg []byte) error { info := Info{} if err := json.Unmarshal(arg, &info); err != nil { return err } if c.typ == ROUTER { c.processRouteInfo(&info) } return nil } func (c *client) processErr(errStr string) { switch c.typ { case CLIENT: c.Errorf("Client Error %s", errStr) case ROUTER: c.Errorf("Route Error %s", errStr) } c.closeConnection() } func (c *client) processConnect(arg []byte) error { c.traceInOp("CONNECT", arg) c.mu.Lock() // If we can't stop the timer because the callback is in progress... if !c.clearAuthTimer() { // wait for it to finish and handle sending the failure back to // the client. for c.nc != nil { c.mu.Unlock() time.Sleep(25 * time.Millisecond) c.mu.Lock() } c.mu.Unlock() return nil } c.last = time.Now() typ := c.typ r := c.route srv := c.srv // Moved unmarshalling of clients' Options under the lock. // The client has already been added to the server map, so it is possible // that other routines lookup the client, and access its options under // the client's lock, so unmarshalling the options outside of the lock // would cause data RACEs. if err := json.Unmarshal(arg, &c.opts); err != nil { c.mu.Unlock() return err } // Indicate that the CONNECT protocol has been received, and that the // server now knows which protocol this client supports. c.flags.set(connectReceived) // Capture these under lock proto := c.opts.Protocol verbose := c.opts.Verbose c.mu.Unlock() if srv != nil { // As soon as c.opts is unmarshalled and if the proto is at // least ClientProtoInfo, we need to increment the following counter. // This is decremented when client is removed from the server's // clients map. if proto >= ClientProtoInfo { srv.mu.Lock() srv.cproto++ srv.mu.Unlock() } // Check for Auth if ok := srv.checkAuth(c); !ok { c.authViolation() return ErrAuthorization } } // Check client protocol request if it exists. if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) { return ErrBadClientProtocol } // Grab connection name of remote route. if typ == ROUTER && r != nil { c.mu.Lock() c.route.remoteID = c.opts.Name c.mu.Unlock() } if verbose { c.sendOK() } return nil } func (c *client) authTimeout() { c.sendErr(ErrAuthTimeout.Error()) c.Debugf("Authorization Timeout") c.closeConnection() } func (c *client) authViolation() { if c.srv != nil && c.srv.opts.Users != nil { c.Errorf("%s - User %q", ErrAuthorization.Error(), c.opts.Username) } else { c.Errorf(ErrAuthorization.Error()) } c.sendErr("Authorization Violation") c.closeConnection() } func (c *client) maxConnExceeded() { c.Errorf(ErrTooManyConnections.Error()) c.sendErr(ErrTooManyConnections.Error()) c.closeConnection() } func (c *client) maxPayloadViolation(sz int) { c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, c.mpay) c.sendErr("Maximum Payload Violation") c.closeConnection() } // Assume the lock is held upon entry. func (c *client) sendProto(info []byte, doFlush bool) error { var err error if c.bw != nil && c.nc != nil { deadlineSet := false if doFlush || c.bw.Available() < len(info) { c.nc.SetWriteDeadline(time.Now().Add(c.srv.opts.WriteDeadline)) deadlineSet = true } _, err = c.bw.Write(info) if err == nil && doFlush { err = c.bw.Flush() } if deadlineSet { c.nc.SetWriteDeadline(time.Time{}) } } return err } // Assume the lock is held upon entry. func (c *client) sendInfo(info []byte) { c.sendProto(info, true) } func (c *client) sendErr(err string) { c.mu.Lock() c.traceOutOp("-ERR", []byte(err)) c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true) c.mu.Unlock() } func (c *client) sendOK() { c.mu.Lock() c.traceOutOp("OK", nil) // Can not autoflush this one, needs to be async. c.sendProto([]byte("+OK\r\n"), false) c.pcd[c] = needFlush c.mu.Unlock() } func (c *client) processPing() { c.mu.Lock() c.traceInOp("PING", nil) if c.nc == nil { c.mu.Unlock() return } c.traceOutOp("PONG", nil) err := c.sendProto([]byte("PONG\r\n"), true) if err != nil { c.clearConnection() c.Debugf("Error on Flush, error %s", err.Error()) } srv := c.srv sendUpdateINFO := false // Check if this is the first PONG, if so... if c.flags.setIfNotSet(firstPongSent) { // Check if server should send an async INFO protocol to the client if c.opts.Protocol >= ClientProtoInfo && srv != nil && c.flags.isSet(infoUpdated) { sendUpdateINFO = true } // We can now clear the flag c.flags.clear(infoUpdated) } c.mu.Unlock() // Some clients send an initial PING as part of the synchronous connect process. // They can't be receiving anything until the first PONG is received. // So we delay the possible updated INFO after this point. if sendUpdateINFO { srv.mu.Lock() // Use the cached protocol proto := srv.infoJSON srv.mu.Unlock() c.mu.Lock() c.sendInfo(proto) c.mu.Unlock() } } func (c *client) processPong() { c.traceInOp("PONG", nil) c.mu.Lock() c.pout = 0 c.mu.Unlock() } func (c *client) processMsgArgs(arg []byte) error { if c.trace { c.traceInOp("MSG", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 3: c.pa.reply = nil c.pa.szb = args[2] c.pa.size = parseSize(args[2]) case 4: c.pa.reply = args[2] c.pa.szb = args[3] c.pa.size = parseSize(args[3]) default: return fmt.Errorf("processMsgArgs Parse Error: '%s'", arg) } if c.pa.size < 0 { return fmt.Errorf("processMsgArgs Bad or Missing Size: '%s'", arg) } // Common ones processed after check for arg length c.pa.subject = args[0] c.pa.sid = args[1] return nil } func (c *client) processPub(arg []byte) error { if c.trace { c.traceInOp("PUB", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_PUB_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 2: c.pa.subject = args[0] c.pa.reply = nil c.pa.size = parseSize(args[1]) c.pa.szb = args[1] case 3: c.pa.subject = args[0] c.pa.reply = args[1] c.pa.size = parseSize(args[2]) c.pa.szb = args[2] default: return fmt.Errorf("processPub Parse Error: '%s'", arg) } if c.pa.size < 0 { return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg) } if c.mpay > 0 && c.pa.size > c.mpay { c.maxPayloadViolation(c.pa.size) return ErrMaxPayload } if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { c.sendErr("Invalid Subject") } return nil } func splitArg(arg []byte) [][]byte { a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } return args } func (c *client) processSub(argo []byte) (err error) { c.traceInOp("SUB", argo) // Indicate activity. c.cache.subs += 1 // Copy so we do not reference a potentially large buffer arg := make([]byte, len(argo)) copy(arg, argo) args := splitArg(arg) sub := &subscription{client: c} switch len(args) { case 2: sub.subject = args[0] sub.queue = nil sub.sid = args[1] case 3: sub.subject = args[0] sub.queue = args[1] sub.sid = args[2] default: return fmt.Errorf("processSub Parse Error: '%s'", arg) } shouldForward := false c.mu.Lock() if c.nc == nil { c.mu.Unlock() return nil } // Check permissions if applicable. if c.perms != nil { r := c.perms.sub.Match(string(sub.subject)) if len(r.psubs) == 0 { c.mu.Unlock() c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)) c.Errorf("Subscription Violation - User %q, Subject %q", c.opts.Username, sub.subject) return nil } } // We can have two SUB protocols coming from a route due to some // race conditions. We should make sure that we process only one. sid := string(sub.sid) if c.subs[sid] == nil { c.subs[sid] = sub if c.srv != nil { err = c.srv.sl.Insert(sub) if err != nil { delete(c.subs, sid) } else { shouldForward = c.typ != ROUTER } } } c.mu.Unlock() if err != nil { c.sendErr("Invalid Subject") return nil } else if c.opts.Verbose { c.sendOK() } if shouldForward { c.srv.broadcastSubscribe(sub) } return nil } func (c *client) unsubscribe(sub *subscription) { c.mu.Lock() defer c.mu.Unlock() if sub.max > 0 && sub.nm < sub.max { c.Debugf( "Deferring actual UNSUB(%s): %d max, %d received\n", string(sub.subject), sub.max, sub.nm) return } c.traceOp("<-> %s", "DELSUB", sub.sid) delete(c.subs, string(sub.sid)) if c.srv != nil { c.srv.sl.Remove(sub) } } func (c *client) processUnsub(arg []byte) error { c.traceInOp("UNSUB", arg) args := splitArg(arg) var sid []byte max := -1 switch len(args) { case 1: sid = args[0] case 2: sid = args[0] max = parseSize(args[1]) default: return fmt.Errorf("processUnsub Parse Error: '%s'", arg) } // Indicate activity. c.cache.subs += 1 var sub *subscription unsub := false shouldForward := false ok := false c.mu.Lock() if sub, ok = c.subs[string(sid)]; ok { if max > 0 { sub.max = int64(max) } else { // Clear it here to override sub.max = 0 } unsub = true shouldForward = c.typ != ROUTER && c.srv != nil } c.mu.Unlock() if unsub { c.unsubscribe(sub) } if shouldForward { c.srv.broadcastUnSubscribe(sub) } if c.opts.Verbose { c.sendOK() } return nil } func (c *client) msgHeader(mh []byte, sub *subscription) []byte { mh = append(mh, sub.sid...) mh = append(mh, ' ') if c.pa.reply != nil { mh = append(mh, c.pa.reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, "\r\n"...) return mh } // Used to treat maps as efficient set var needFlush = struct{}{} var routeSeen = struct{}{} func (c *client) deliverMsg(sub *subscription, mh, msg []byte) { if sub.client == nil { return } client := sub.client client.mu.Lock() sub.nm++ // Check if we should auto-unsubscribe. if sub.max > 0 { // For routing.. shouldForward := client.typ != ROUTER && client.srv != nil // If we are at the exact number, unsubscribe but // still process the message in hand, otherwise // unsubscribe and drop message on the floor. if sub.nm == sub.max { c.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid)) // Due to defer, reverse the code order so that execution // is consistent with other cases where we unsubscribe. if shouldForward { defer client.srv.broadcastUnSubscribe(sub) } defer client.unsubscribe(sub) } else if sub.nm > sub.max { c.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max) client.mu.Unlock() client.unsubscribe(sub) if shouldForward { client.srv.broadcastUnSubscribe(sub) } return } } if client.nc == nil { client.mu.Unlock() return } // Update statistics // The msg includes the CR_LF, so pull back out for accounting. msgSize := int64(len(msg) - LEN_CR_LF) // No atomic needed since accessed under client lock. // Monitor is reading those also under client's lock. client.outMsgs++ client.outBytes += msgSize atomic.AddInt64(&c.srv.outMsgs, 1) atomic.AddInt64(&c.srv.outBytes, msgSize) // Check to see if our writes will cause a flush // in the underlying bufio. If so limit time we // will wait for flush to complete. deadlineSet := false if client.bw.Available() < (len(mh) + len(msg)) { client.wfc++ client.nc.SetWriteDeadline(time.Now().Add(client.srv.opts.WriteDeadline)) deadlineSet = true } // Deliver to the client. _, err := client.bw.Write(mh) if err != nil { goto writeErr } _, err = client.bw.Write(msg) if err != nil { goto writeErr } if c.trace { client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil) } // TODO(dlc) - Do we need this or can we just call always? if deadlineSet { client.nc.SetWriteDeadline(time.Time{}) } client.mu.Unlock() c.pcd[client] = needFlush return writeErr: if deadlineSet { client.nc.SetWriteDeadline(time.Time{}) } client.mu.Unlock() if ne, ok := err.(net.Error); ok && ne.Timeout() { atomic.AddInt64(&client.srv.slowConsumers, 1) client.Noticef("Slow Consumer Detected") client.closeConnection() } else { c.Debugf("Error writing msg: %v", err) } } // processMsg is called to process an inbound msg from a client. func (c *client) processMsg(msg []byte) { // Snapshot server. srv := c.srv // Update statistics // The msg includes the CR_LF, so pull back out for accounting. c.cache.inMsgs += 1 c.cache.inBytes += len(msg) - LEN_CR_LF if c.trace { c.traceMsg(msg) } // defintely // Disallow publish to _SYS.>, these are reserved for internals. if c.pa.subject[0] == '_' && len(c.pa.subject) > 4 && c.pa.subject[1] == 'S' && c.pa.subject[2] == 'Y' && c.pa.subject[3] == 'S' && c.pa.subject[4] == '.' { c.pubPermissionViolation(c.pa.subject) return } // Check if published subject is allowed if we have permissions in place. if c.perms != nil { allowed, ok := c.perms.pcache[string(c.pa.subject)] if ok && !allowed { c.pubPermissionViolation(c.pa.subject) return } if !ok { r := c.perms.pub.Match(string(c.pa.subject)) notAllowed := len(r.psubs) == 0 if notAllowed { c.pubPermissionViolation(c.pa.subject) c.perms.pcache[string(c.pa.subject)] = false } else { c.perms.pcache[string(c.pa.subject)] = true } // Prune if needed. if len(c.perms.pcache) > maxPermCacheSize { // Prune the permissions cache. Keeps us from unbounded growth. r := 0 for subject := range c.perms.pcache { delete(c.cache.results, subject) r++ if r > pruneSize { break } } } // Return here to allow the pruning code to run if needed. if notAllowed { return } } } if c.opts.Verbose { c.sendOK() } // Mostly under testing scenarios. if srv == nil { return } var r *SublistResult var ok bool genid := atomic.LoadUint64(&srv.sl.genid) if genid == c.cache.genid && c.cache.results != nil { r, ok = c.cache.results[string(c.pa.subject)] } else { // reset c.cache.results = make(map[string]*SublistResult) c.cache.genid = genid } if !ok { subject := string(c.pa.subject) r = srv.sl.Match(subject) c.cache.results[subject] = r if len(c.cache.results) > maxResultCacheSize { // Prune the results cache. Keeps us from unbounded growth. r := 0 for subject := range c.cache.results { delete(c.cache.results, subject) r++ if r > pruneSize { break } } } } // Check for no interest, short circuit if so. if len(r.psubs) == 0 && len(r.qsubs) == 0 { return } // Check for pedantic and bad subject. if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { return } // Scratch buffer.. msgh := c.msgb[:len(msgHeadProto)] // msg header msgh = append(msgh, c.pa.subject...) msgh = append(msgh, ' ') si := len(msgh) isRoute := c.typ == ROUTER // If we are a route and we have a queue subscription, deliver direct // since they are sent direct via L2 semantics. If the match is a queue // subscription, we will return from here regardless if we find a sub. if isRoute { if sub, ok := srv.routeSidQueueSubscriber(c.pa.sid); ok { if sub != nil { mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } return } } // Used to only send normal subscriptions once across a given route. var rmap map[string]struct{} // Loop over all normal subscriptions that match. for _, sub := range r.psubs { // Check if this is a send to a ROUTER, make sure we only send it // once. The other side will handle the appropriate re-processing // and fan-out. Also enforce 1-Hop semantics, so no routing to another. if sub.client.typ == ROUTER { // Skip if sourced from a ROUTER and going to another ROUTER. // This is 1-Hop semantics for ROUTERs. if isRoute { continue } // Check to see if we have already sent it here. if rmap == nil { rmap = make(map[string]struct{}, srv.numRoutes()) } sub.client.mu.Lock() if sub.client.nc == nil || sub.client.route == nil || sub.client.route.remoteID == "" { c.Debugf("Bad or Missing ROUTER Identity, not processing msg") sub.client.mu.Unlock() continue } if _, ok := rmap[sub.client.route.remoteID]; ok { c.Debugf("Ignoring route, already processed") sub.client.mu.Unlock() continue } rmap[sub.client.route.remoteID] = routeSeen sub.client.mu.Unlock() } // Normal delivery mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } // Now process any queue subs we have if not a route if !isRoute { // Check to see if we have our own rand yet. Global rand // has contention with lots of clients, etc. if c.cache.prand == nil { c.cache.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } // Process queue subs for i := 0; i < len(r.qsubs); i++ { qsubs := r.qsubs[i] index := c.cache.prand.Intn(len(qsubs)) sub := qsubs[index] if sub != nil { mh := c.msgHeader(msgh[:si], sub) c.deliverMsg(sub, mh, msg) } } } } func (c *client) pubPermissionViolation(subject []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject)) c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject) } func (c *client) processPingTimer() { c.mu.Lock() defer c.mu.Unlock() c.ptmr = nil // Check if we are ready yet.. if _, ok := c.nc.(*net.TCPConn); !ok { return } c.Debugf("%s Ping Timer", c.typeString()) // Check for violation c.pout++ if c.pout > c.srv.opts.MaxPingsOut { c.Debugf("Stale Client Connection - Closing") c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true) c.clearConnection() return } c.traceOutOp("PING", nil) // Send PING err := c.sendProto([]byte("PING\r\n"), true) if err != nil { c.Debugf("Error on Client Ping Flush, error %s", err) c.clearConnection() } else { // Reset to fire again if all OK. c.setPingTimer() } } func (c *client) setPingTimer() { if c.srv == nil { return } d := c.srv.opts.PingInterval c.ptmr = time.AfterFunc(d, c.processPingTimer) } // Lock should be held func (c *client) clearPingTimer() { if c.ptmr == nil { return } c.ptmr.Stop() c.ptmr = nil } // Lock should be held func (c *client) setAuthTimer(d time.Duration) { c.atmr = time.AfterFunc(d, func() { c.authTimeout() }) } // Lock should be held func (c *client) clearAuthTimer() bool { if c.atmr == nil { return true } stopped := c.atmr.Stop() c.atmr = nil return stopped } func (c *client) isAuthTimerSet() bool { c.mu.Lock() isSet := c.atmr != nil c.mu.Unlock() return isSet } // Lock should be held func (c *client) clearConnection() { if c.nc == nil { return } // With TLS, Close() is sending an alert (that is doing a write). // Need to set a deadline otherwise the server could block there // if the peer is not reading from socket. c.nc.SetWriteDeadline(time.Now().Add(c.srv.opts.WriteDeadline)) if c.bw != nil { c.bw.Flush() } c.nc.Close() c.nc.SetWriteDeadline(time.Time{}) } func (c *client) typeString() string { switch c.typ { case CLIENT: return "Client" case ROUTER: return "Router" } return "Unknown Type" } func (c *client) closeConnection() { c.mu.Lock() if c.nc == nil { c.mu.Unlock() return } c.Debugf("%s connection closed", c.typeString()) c.clearAuthTimer() c.clearPingTimer() c.clearConnection() c.nc = nil // Snapshot for use. subs := make([]*subscription, 0, len(c.subs)) for _, sub := range c.subs { subs = append(subs, sub) } srv := c.srv retryImplicit := false if c.route != nil { retryImplicit = c.route.retry } c.mu.Unlock() if srv != nil { // Unregister srv.removeClient(c) // Remove clients subscriptions. for _, sub := range subs { srv.sl.Remove(sub) // Forward on unsubscribes if we are not // a router ourselves. if c.typ != ROUTER { srv.broadcastUnSubscribe(sub) } } } // Check for a solicited route. If it was, start up a reconnect unless // we are already connected to the other end. if c.isSolicitedRoute() || retryImplicit { // Capture these under lock c.mu.Lock() rid := c.route.remoteID rtype := c.route.routeType rurl := c.route.url c.mu.Unlock() srv.mu.Lock() defer srv.mu.Unlock() // It is possible that the server is being shutdown. // If so, don't try to reconnect if !srv.running { return } if rid != "" && srv.remotes[rid] != nil { Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid) return } else if rid == srv.info.ID { Debugf("Detected route to self, ignoring \"%s\"", rurl) return } else if rtype != Implicit || retryImplicit { Debugf("Attempting reconnect for solicited route \"%s\"", rurl) // Keep track of this go-routine so we can wait for it on // server shutdown. srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) }) } } } // Logging functionality scoped to a client or route. func (c *client) Errorf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Errorf(format, v...) } func (c *client) Debugf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Debugf(format, v...) } func (c *client) Noticef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Noticef(format, v...) } func (c *client) Tracef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) Tracef(format, v...) }
1
6,789
We had the checks for those two errors for a reason, why do we not need them anymore?
nats-io-nats-server
go
@@ -19,6 +19,7 @@ from rdkit import Chem from rdkit.Chem.PeriodicTable import numTable from rdkit.Chem import Crippen from rdkit.Chem import rdPartialCharges, rdMolDescriptors +from rdkit.six.moves import xrange import numpy import bisect radCol = 5
1
# $Id$ # # Copyright (C) 2001-2008 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """ Exposes functionality for MOE-like approximate molecular surface area descriptors. The MOE-like VSA descriptors are also calculated here """ from __future__ import print_function from rdkit import Chem from rdkit.Chem.PeriodicTable import numTable from rdkit.Chem import Crippen from rdkit.Chem import rdPartialCharges, rdMolDescriptors import numpy import bisect radCol = 5 bondScaleFacts = [.1, 0, .2, .3] # aromatic,single,double,triple def _LabuteHelper(mol, includeHs=1, force=0): """ *Internal Use Only* helper function for LabuteASA calculation returns an array of atomic contributions to the ASA **Note:** Changes here affect the version numbers of all ASA descriptors """ if not force: try: res = mol._labuteContribs except AttributeError: pass else: if res: return res tpl = rdMolDescriptors._CalcLabuteASAContribs(mol, includeHs) ats, hs = tpl Vi = [hs] + list(ats) mol._labuteContribs = Vi return Vi def _pyLabuteHelper(mol, includeHs=1, force=0): """ *Internal Use Only* helper function for LabuteASA calculation returns an array of atomic contributions to the ASA **Note:** Changes here affect the version numbers of all ASA descriptors """ import math if not force: try: res = mol._labuteContribs except AttributeError: pass else: if res.all(): return res nAts = mol.GetNumAtoms() Vi = numpy.zeros(nAts + 1, 'd') rads = numpy.zeros(nAts + 1, 'd') # 0 contains the H information rads[0] = numTable[1][radCol] for i in xrange(nAts): rads[i + 1] = numTable[mol.GetAtomWithIdx(i).GetAtomicNum()][radCol] # start with explicit bonds for bond in mol.GetBonds(): idx1 = bond.GetBeginAtomIdx() + 1 idx2 = bond.GetEndAtomIdx() + 1 Ri = rads[idx1] Rj = rads[idx2] if not bond.GetIsAromatic(): bij = Ri + Rj - bondScaleFacts[bond.GetBondType()] else: bij = Ri + Rj - bondScaleFacts[0] dij = min(max(abs(Ri - Rj), bij), Ri + Rj) Vi[idx1] += Rj * Rj - (Ri - dij)**2 / dij Vi[idx2] += Ri * Ri - (Rj - dij)**2 / dij # add in hydrogens if includeHs: j = 0 Rj = rads[j] for i in xrange(1, nAts + 1): Ri = rads[i] bij = Ri + Rj dij = min(max(abs(Ri - Rj), bij), Ri + Rj) Vi[i] += Rj * Rj - (Ri - dij)**2 / dij Vi[j] += Ri * Ri - (Rj - dij)**2 / dij for i in xrange(nAts + 1): Ri = rads[i] Vi[i] = 4 * math.pi * Ri**2 - math.pi * Ri * Vi[i] mol._labuteContribs = Vi return Vi #def SMR_VSA(mol,bins=[0.11,0.26,0.35,0.39,0.44,0.485,0.56]): # original default bins from assuming Labute values are logs # mrBins=[1.29, 1.82, 2.24, 2.45, 2.75, 3.05, 3.63] mrBins = [1.29, 1.82, 2.24, 2.45, 2.75, 3.05, 3.63, 3.8, 4.0] def pySMR_VSA_(mol, bins=None, force=1): """ *Internal Use Only* """ if not force: try: res = mol._smrVSA except AttributeError: pass else: if res.all(): return res if bins is None: bins = mrBins Crippen._Init() propContribs = Crippen._GetAtomContribs(mol, force=force) volContribs = _LabuteHelper(mol) ans = numpy.zeros(len(bins) + 1, 'd') for i in range(len(propContribs)): prop = propContribs[i] vol = volContribs[i + 1] if prop is not None: bin = bisect.bisect_right(bins, prop[1]) ans[bin] += vol mol._smrVSA = ans return ans SMR_VSA_ = rdMolDescriptors.SMR_VSA_ # # Original bins (from Labute paper) are: # [-0.4,-0.2,0,0.1,0.15,0.2,0.25,0.3,0.4] # logpBins = [-0.4, -0.2, 0, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6] def pySlogP_VSA_(mol, bins=None, force=1): """ *Internal Use Only* """ if not force: try: res = mol._slogpVSA except AttributeError: pass else: if res.all(): return res if bins is None: bins = logpBins Crippen._Init() propContribs = Crippen._GetAtomContribs(mol, force=force) volContribs = _LabuteHelper(mol) ans = numpy.zeros(len(bins) + 1, 'd') for i in range(len(propContribs)): prop = propContribs[i] vol = volContribs[i + 1] if prop is not None: bin = bisect.bisect_right(bins, prop[0]) ans[bin] += vol mol._slogpVSA = ans return ans SlogP_VSA_ = rdMolDescriptors.SlogP_VSA_ chgBins = [-.3, -.25, -.20, -.15, -.10, -.05, 0, .05, .10, .15, .20, .25, .30] def pyPEOE_VSA_(mol, bins=None, force=1): """ *Internal Use Only* """ if not force: try: res = mol._peoeVSA except AttributeError: pass else: if res.all(): return res if bins is None: bins = chgBins Crippen._Init() #print('\ts:',repr(mol.GetMol())) #print('\t\t:',len(mol.GetAtoms())) rdPartialCharges.ComputeGasteigerCharges(mol) #propContribs = [float(x.GetProp('_GasteigerCharge')) for x in mol.GetAtoms()] propContribs = [] for at in mol.GetAtoms(): p = at.GetProp('_GasteigerCharge') try: v = float(p) except ValueError: v = 0.0 propContribs.append(v) #print '\tp',propContribs volContribs = _LabuteHelper(mol) #print '\tv',volContribs ans = numpy.zeros(len(bins) + 1, 'd') for i in range(len(propContribs)): prop = propContribs[i] vol = volContribs[i + 1] if prop is not None: bin = bisect.bisect_right(bins, prop) ans[bin] += vol mol._peoeVSA = ans return ans PEOE_VSA_ = rdMolDescriptors.PEOE_VSA_ #------------------------------------------------- # install the various VSA descriptors in the namespace def _InstallDescriptors(): for i in range(len(mrBins)): fn = lambda x, y=i: SMR_VSA_(x, force=0)[y] if i > 0: fn.__doc__ = "MOE MR VSA Descriptor %d (% 4.2f <= x < % 4.2f)" % (i + 1, mrBins[i - 1], mrBins[i]) else: fn.__doc__ = "MOE MR VSA Descriptor %d (-inf < x < % 4.2f)" % (i + 1, mrBins[i]) name = "SMR_VSA%d" % (i + 1) fn.version = "1.0.1" globals()[name] = fn i += 1 fn = lambda x, y=i: SMR_VSA_(x, force=0)[y] fn.__doc__ = "MOE MR VSA Descriptor %d (% 4.2f <= x < inf)" % (i + 1, mrBins[i - 1]) fn.version = "1.0.1" name = "SMR_VSA%d" % (i + 1) globals()[name] = fn for i in range(len(logpBins)): fn = lambda x, y=i: SlogP_VSA_(x, force=0)[y] if i > 0: fn.__doc__ = "MOE logP VSA Descriptor %d (% 4.2f <= x < % 4.2f)" % (i + 1, logpBins[i - 1], logpBins[i]) else: fn.__doc__ = "MOE logP VSA Descriptor %d (-inf < x < % 4.2f)" % (i + 1, logpBins[i]) name = "SlogP_VSA%d" % (i + 1) fn.version = "1.0.1" globals()[name] = fn i += 1 fn = lambda x, y=i: SlogP_VSA_(x, force=0)[y] fn.__doc__ = "MOE logP VSA Descriptor %d (% 4.2f <= x < inf)" % (i + 1, logpBins[i - 1]) fn.version = "1.0.1" name = "SlogP_VSA%d" % (i + 1) globals()[name] = fn for i in range(len(chgBins)): fn = lambda x, y=i: PEOE_VSA_(x, force=0)[y] if i > 0: fn.__doc__ = "MOE Charge VSA Descriptor %d (% 4.2f <= x < % 4.2f)" % (i + 1, chgBins[i - 1], chgBins[i]) else: fn.__doc__ = "MOE Charge VSA Descriptor %d (-inf < x < % 4.2f)" % (i + 1, chgBins[i]) name = "PEOE_VSA%d" % (i + 1) fn.version = "1.0.1" globals()[name] = fn i += 1 fn = lambda x, y=i: PEOE_VSA_(x, force=0)[y] fn.version = "1.0.1" fn.__doc__ = "MOE Charge VSA Descriptor %d (% 4.2f <= x < inf)" % (i + 1, chgBins[i - 1]) name = "PEOE_VSA%d" % (i + 1) globals()[name] = fn fn = None # Change log for the MOE-type descriptors: # version 1.0.1: optimizations, values unaffected _InstallDescriptors() def pyLabuteASA(mol, includeHs=1): """ calculates Labute's Approximate Surface Area (ASA from MOE) Definition from P. Labute's article in the Journal of the Chemical Computing Group and J. Mol. Graph. Mod. _18_ 464-477 (2000) """ Vi = _LabuteHelper(mol, includeHs=includeHs) return sum(Vi) pyLabuteASA.version = "1.0.1" # Change log for LabuteASA: # version 1.0.1: optimizations, values unaffected LabuteASA = lambda *x, **y: rdMolDescriptors.CalcLabuteASA(*x, **y) LabuteASA.version = rdMolDescriptors._CalcLabuteASA_version def _pyTPSAContribs(mol, verbose=False): """ DEPRECATED: this has been reimplmented in C++ calculates atomic contributions to a molecules TPSA Algorithm described in: P. Ertl, B. Rohde, P. Selzer Fast Calculation of Molecular Polar Surface Area as a Sum of Fragment-based Contributions and Its Application to the Prediction of Drug Transport Properties, J.Med.Chem. 43, 3714-3717, 2000 Implementation based on the Daylight contrib program tpsa.c NOTE: The JMC paper describing the TPSA algorithm includes contributions from sulfur and phosphorus, however according to Peter Ertl (personal communication, 2010) the correlation of TPSA with various ADME properties is better if only contributions from oxygen and nitrogen are used. This matches the daylight contrib implementation. """ res = [0] * mol.GetNumAtoms() for i in range(mol.GetNumAtoms()): atom = mol.GetAtomWithIdx(i) atNum = atom.GetAtomicNum() if atNum in [7, 8]: #nHs = atom.GetImplicitValence()-atom.GetHvyValence() nHs = atom.GetTotalNumHs() chg = atom.GetFormalCharge() isArom = atom.GetIsAromatic() in3Ring = atom.IsInRingSize(3) bonds = atom.GetBonds() numNeighbors = atom.GetDegree() nSing = 0 nDoub = 0 nTrip = 0 nArom = 0 for bond in bonds: otherAt = bond.GetOtherAtom(atom) if otherAt.GetAtomicNum() != 1: if bond.GetIsAromatic(): nArom += 1 else: ord = bond.GetBondType() if ord == Chem.BondType.SINGLE: nSing += 1 elif ord == Chem.BondType.DOUBLE: nDoub += 1 elif ord == Chem.BondType.TRIPLE: nTrip += 1 else: numNeighbors -= 1 nHs += 1 tmp = -1 if atNum == 7: if numNeighbors == 1: if nHs == 0 and nTrip == 1 and chg == 0: tmp = 23.79 elif nHs == 1 and nDoub == 1 and chg == 0: tmp = 23.85 elif nHs == 2 and nSing == 1 and chg == 0: tmp = 26.02 elif nHs == 2 and nDoub == 1 and chg == 1: tmp = 25.59 elif nHs == 3 and nSing == 1 and chg == 1: tmp = 27.64 elif numNeighbors == 2: if nHs == 0 and nSing == 1 and nDoub == 1 and chg == 0: tmp = 12.36 elif nHs == 0 and nTrip == 1 and nDoub == 1 and chg == 0: tmp = 13.60 elif nHs == 1 and nSing == 2 and chg == 0: if not in3Ring: tmp = 12.03 else: tmp = 21.94 elif nHs == 0 and nTrip == 1 and nSing == 1 and chg == 1: tmp = 4.36 elif nHs == 1 and nDoub == 1 and nSing == 1 and chg == 1: tmp = 13.97 elif nHs == 2 and nSing == 2 and chg == 1: tmp = 16.61 elif nHs == 0 and nArom == 2 and chg == 0: tmp = 12.89 elif nHs == 1 and nArom == 2 and chg == 0: tmp = 15.79 elif nHs == 1 and nArom == 2 and chg == 1: tmp = 14.14 elif numNeighbors == 3: if nHs == 0 and nSing == 3 and chg == 0: if not in3Ring: tmp = 3.24 else: tmp = 3.01 elif nHs == 0 and nSing == 1 and nDoub == 2 and chg == 0: tmp = 11.68 elif nHs == 0 and nSing == 2 and nDoub == 1 and chg == 1: tmp = 3.01 elif nHs == 1 and nSing == 3 and chg == 1: tmp = 4.44 elif nHs == 0 and nArom == 3 and chg == 0: tmp = 4.41 elif nHs == 0 and nSing == 1 and nArom == 2 and chg == 0: tmp = 4.93 elif nHs == 0 and nDoub == 1 and nArom == 2 and chg == 0: tmp = 8.39 elif nHs == 0 and nArom == 3 and chg == 1: tmp = 4.10 elif nHs == 0 and nSing == 1 and nArom == 2 and chg == 1: tmp = 3.88 elif numNeighbors == 4: if nHs == 0 and nSing == 4 and chg == 1: tmp = 0.00 if tmp < 0.0: tmp = 30.5 - numNeighbors * 8.2 + nHs * 1.5 if tmp < 0.0: tmp = 0.0 elif atNum == 8: #print(nHs,nSing,chg) if numNeighbors == 1: if nHs == 0 and nDoub == 1 and chg == 0: tmp = 17.07 elif nHs == 1 and nSing == 1 and chg == 0: tmp = 20.23 elif nHs == 0 and nSing == 1 and chg == -1: tmp = 23.06 elif numNeighbors == 2: if nHs == 0 and nSing == 2 and chg == 0: if not in3Ring: tmp = 9.23 else: tmp = 12.53 elif nHs == 0 and nArom == 2 and chg == 0: tmp = 13.14 if tmp < 0.0: tmp = 28.5 - numNeighbors * 8.6 + nHs * 1.5 if tmp < 0.0: tmp = 0.0 if verbose: print('\t', atom.GetIdx(), atom.GetSymbol(), atNum, nHs, nSing, nDoub, nTrip, nArom, chg, tmp) res[atom.GetIdx()] = tmp return res def _pyTPSA(mol, verbose=False): """ DEPRECATED: this has been reimplmented in C++ calculates the polar surface area of a molecule based upon fragments Algorithm in: P. Ertl, B. Rohde, P. Selzer Fast Calculation of Molecular Polar Surface Area as a Sum of Fragment-based Contributions and Its Application to the Prediction of Drug Transport Properties, J.Med.Chem. 43, 3714-3717, 2000 Implementation based on the Daylight contrib program tpsa.c """ contribs = _pyTPSAContribs(mol, verbose=verbose) res = 0.0 for contrib in contribs: res += contrib return res _pyTPSA.version = "1.0.1" TPSA = lambda *x, **y: rdMolDescriptors.CalcTPSA(*x, **y) TPSA.version = rdMolDescriptors._CalcTPSA_version if __name__ == '__main__': smis = ['C', 'CC', 'CCC', 'CCCC', 'CO', 'CCO', 'COC'] smis = ['C(=O)O', 'c1ccccc1'] for smi in smis: m = Chem.MolFromSmiles(smi) #print(smi, LabuteASA(m)) print('-----------\n', smi) #print('M:',['% 4.2f'%x for x in SMR_VSA_(m)]) #print('L:',['% 4.2f'%x for x in SlogP_VSA_(m)]) print('P:', ['% 4.2f' % x for x in PEOE_VSA_(m)]) print('P:', ['% 4.2f' % x for x in PEOE_VSA_(m)]) print()
1
15,994
Why didn't you just switch to `range` here too?
rdkit-rdkit
cpp
@@ -32,15 +32,15 @@ import ( "google.golang.org/api/option" ) -// New returns a GCS Bucket. It handles creation of a client used to communicate +// NewBucket returns a GCS Bucket. It handles creation of a client used to communicate // to GCS service. -func New(ctx context.Context, bucketName string, opts *BucketOptions) (*blob.Bucket, error) { +func NewBucket(ctx context.Context, bucketName string, client *gcp.HTTPClient) (*blob.Bucket, error) { if err := validateBucketChar(bucketName); err != nil { return nil, err } var o []option.ClientOption - if opts != nil { - o = append(o, option.WithTokenSource(opts.TokenSource)) + if client != nil { + o = append(o, option.WithHTTPClient(&client.Client)) } c, err := storage.NewClient(ctx, o...) if err != nil {
1
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package gcsblob provides an implementation of using blob API on GCS. // It is a wrapper around GCS client library. package gcsblob import ( "context" "errors" "fmt" "regexp" "unicode/utf8" "github.com/google/go-x-cloud/blob" "github.com/google/go-x-cloud/blob/driver" "github.com/google/go-x-cloud/gcp" "cloud.google.com/go/storage" "google.golang.org/api/googleapi" "google.golang.org/api/option" ) // New returns a GCS Bucket. It handles creation of a client used to communicate // to GCS service. func New(ctx context.Context, bucketName string, opts *BucketOptions) (*blob.Bucket, error) { if err := validateBucketChar(bucketName); err != nil { return nil, err } var o []option.ClientOption if opts != nil { o = append(o, option.WithTokenSource(opts.TokenSource)) } c, err := storage.NewClient(ctx, o...) if err != nil { return nil, err } return blob.NewBucket(&bucket{name: bucketName, client: c}), nil } // bucket represents a GCS bucket, which handles read, write and delete operations // on objects within it. type bucket struct { name string client *storage.Client } // BucketOptions provides information settings during bucket initialization. type BucketOptions struct { TokenSource gcp.TokenSource } type reader struct { *storage.Reader } func (r *reader) Attrs() *driver.ObjectAttrs { return &driver.ObjectAttrs{ Size: r.Size(), ContentType: r.ContentType(), } } // NewRangeReader returns a Reader that reads part of an object, reading at most // length bytes starting at the given offset. If length is 0, it will read only // the metadata. If length is negative, it will read till the end of the object. func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (driver.Reader, error) { bkt := b.client.Bucket(b.name) obj := bkt.Object(key) r, err := obj.NewRangeReader(ctx, offset, length) if isErrNotExist(err) { return nil, gcsError{bucket: b.name, key: key, msg: err.Error(), kind: driver.NotFound} } return &reader{Reader: r}, err } // NewWriter returns Writer that writes to an object associated with key. // // A new object will be created unless an object with this key already exists. // Otherwise any previous object with the same name will be replaced. // The object will not be available (and any previous object will remain) // until Close has been called. // // A WriterOptions can be given to change the default behavior of the Writer. // // The caller must call Close on the returned Writer when done writing. func (b *bucket) NewWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) { if err := validateObjectChar(key); err != nil { return nil, err } bkt := b.client.Bucket(b.name) obj := bkt.Object(key) w := obj.NewWriter(ctx) w.ContentType = contentType if opts != nil { w.ChunkSize = bufferSize(opts.BufferSize) } return w, nil } // Delete deletes the object associated with key. It is a no-op if that object // does not exist. func (b *bucket) Delete(ctx context.Context, key string) error { bkt := b.client.Bucket(b.name) obj := bkt.Object(key) err := obj.Delete(ctx) if isErrNotExist(err) { return gcsError{bucket: b.name, key: key, msg: err.Error(), kind: driver.NotFound} } return err } const namingRuleURL = "https://cloud.google.com/storage/docs/naming" // validateBucketChar checks whether character set and length meet the general requirement // of bucket naming rule. See https://cloud.google.com/storage/docs/naming for // the full requirements and best practice. func validateBucketChar(name string) error { v := regexp.MustCompile(`^[a-z0-9][a-z0-9-_.]{1,220}[a-z0-9]$`) if !v.MatchString(name) { return fmt.Errorf("invalid bucket name, see %s for detailed requirements", namingRuleURL) } return nil } // validateObjectChar checks whether name is a valid UTF-8 encoded string, and its // length is between 1-1024 bytes. See https://cloud.google.com/storage/docs/naming // for the full requirements and best practice. func validateObjectChar(name string) error { if name == "" { return errors.New("object name is empty") } if !utf8.ValidString(name) { return fmt.Errorf("object name is not valid UTF-8, see %s for detailed requirements", namingRuleURL) } if len(name) > 1024 { return fmt.Errorf("object name is longer than 1024 bytes, see %s for detailed requirements", namingRuleURL) } return nil } func bufferSize(size int) int { if size == 0 { return googleapi.DefaultUploadChunkSize } else if size > 0 { return size } return 0 // disable buffering } type gcsError struct { bucket, key, msg string kind driver.ErrorKind } func (e gcsError) Error() string { return fmt.Sprintf("gcs://%s/%s: %s", e.bucket, e.key, e.msg) } func (e gcsError) BlobError() driver.ErrorKind { return e.kind } func isErrNotExist(err error) bool { return err == storage.ErrObjectNotExist }
1
10,158
I don't even think `nil` should be allowed, since it uses global state to get the client.
google-go-cloud
go
@@ -352,6 +352,9 @@ def qt_message_handler(msg_type, context, msg): # may not be available on the system "QSslSocket: cannot resolve SSLv3_client_method", "QSslSocket: cannot resolve SSLv3_server_method", + # When enabling debugging with QtWebEngine + "Remote debugging server started successfully. Try pointing a " + "Chromium-based browser to ", ] if sys.platform == 'darwin': suppressed_msgs += [
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Loggers and utilities related to logging.""" import os import sys import html as pyhtml import logging import contextlib import collections import faulthandler import traceback import warnings import json import inspect from PyQt5 import QtCore # Optional imports try: import colorama except ImportError: colorama = None _log_inited = False COLORS = ['black', 'red', 'green', 'yellow', 'blue', 'purple', 'cyan', 'white'] COLOR_ESCAPES = {color: '\033[{}m'.format(i) for i, color in enumerate(COLORS, start=30)} RESET_ESCAPE = '\033[0m' # Log formats to use. SIMPLE_FMT = ('{green}{asctime:8}{reset} {log_color}{levelname}{reset}: ' '{message}') EXTENDED_FMT = ('{green}{asctime:8}{reset} ' '{log_color}{levelname:8}{reset} ' '{cyan}{name:10} {module}:{funcName}:{lineno}{reset} ' '{log_color}{message}{reset}') EXTENDED_FMT_HTML = ( '<tr>' '<td><pre>%(green)s%(asctime)-8s%(reset)s</pre></td>' '<td><pre>%(log_color)s%(levelname)-8s%(reset)s</pre></td>' '<td></pre>%(cyan)s%(name)-10s</pre></td>' '<td><pre>%(cyan)s%(module)s:%(funcName)s:%(lineno)s%(reset)s</pre></td>' '<td><pre>%(log_color)s%(message)s%(reset)s</pre></td>' '</tr>' ) DATEFMT = '%H:%M:%S' LOG_COLORS = { 'VDEBUG': 'white', 'DEBUG': 'white', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } # We first monkey-patch logging to support our VDEBUG level before getting the # loggers. Based on http://stackoverflow.com/a/13638084 VDEBUG_LEVEL = 9 logging.addLevelName(VDEBUG_LEVEL, 'VDEBUG') logging.VDEBUG = VDEBUG_LEVEL LOG_LEVELS = { 'VDEBUG': logging.VDEBUG, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL, } def vdebug(self, msg, *args, **kwargs): """Log with a VDEBUG level. VDEBUG is used when a debug message is rather verbose, and probably of little use to the end user or for post-mortem debugging, i.e. the content probably won't change unless the code changes. """ if self.isEnabledFor(VDEBUG_LEVEL): # pylint: disable=protected-access self._log(VDEBUG_LEVEL, msg, args, **kwargs) logging.Logger.vdebug = vdebug # The different loggers used. statusbar = logging.getLogger('statusbar') completion = logging.getLogger('completion') destroy = logging.getLogger('destroy') modes = logging.getLogger('modes') webview = logging.getLogger('webview') mouse = logging.getLogger('mouse') misc = logging.getLogger('misc') url = logging.getLogger('url') procs = logging.getLogger('procs') commands = logging.getLogger('commands') init = logging.getLogger('init') signals = logging.getLogger('signals') hints = logging.getLogger('hints') keyboard = logging.getLogger('keyboard') downloads = logging.getLogger('downloads') js = logging.getLogger('js') # Javascript console messages qt = logging.getLogger('qt') # Warnings produced by Qt rfc6266 = logging.getLogger('rfc6266') ipc = logging.getLogger('ipc') shlexer = logging.getLogger('shlexer') save = logging.getLogger('save') message = logging.getLogger('message') config = logging.getLogger('config') sessions = logging.getLogger('sessions') ram_handler = None def stub(suffix=''): """Show a STUB: message for the calling function.""" function = inspect.stack()[1][3] text = "STUB: {}".format(function) if suffix: text = '{} ({})'.format(text, suffix) misc.warning(text) class CriticalQtWarning(Exception): """Exception raised when there's a critical Qt warning.""" def init_log(args): """Init loggers based on the argparse namespace passed.""" level = args.loglevel.upper() try: numeric_level = getattr(logging, level) except AttributeError: raise ValueError("Invalid log level: {}".format(args.loglevel)) if numeric_level > logging.DEBUG and args.debug: numeric_level = logging.DEBUG console, ram = _init_handlers(numeric_level, args.color, args.force_color, args.json_logging, args.loglines) root = logging.getLogger() if console is not None: if args.logfilter is not None: console.addFilter(LogFilter(args.logfilter.split(','))) root.addHandler(console) if ram is not None: root.addHandler(ram) root.setLevel(logging.NOTSET) logging.captureWarnings(True) _init_py_warnings() QtCore.qInstallMessageHandler(qt_message_handler) global _log_inited _log_inited = True def _init_py_warnings(): """Initialize Python warning handling.""" warnings.simplefilter('default') warnings.filterwarnings('ignore', module='pdb', category=ResourceWarning) @contextlib.contextmanager def disable_qt_msghandler(): """Contextmanager which temporarily disables the Qt message handler.""" old_handler = QtCore.qInstallMessageHandler(None) try: yield finally: QtCore.qInstallMessageHandler(old_handler) @contextlib.contextmanager def ignore_py_warnings(**kwargs): """Contextmanager to temporarily disable certain Python warnings.""" warnings.filterwarnings('ignore', **kwargs) yield if _log_inited: _init_py_warnings() def _init_handlers(level, color, force_color, json_logging, ram_capacity): """Init log handlers. Args: level: The numeric logging level. color: Whether to use color if available. force_color: Force colored output. json_logging: Output log lines in JSON (this disables all colors). """ global ram_handler console_fmt, ram_fmt, html_fmt, use_colorama = _init_formatters( level, color, force_color, json_logging) if sys.stderr is None: console_handler = None else: strip = False if force_color else None if use_colorama: stream = colorama.AnsiToWin32(sys.stderr, strip=strip) else: stream = sys.stderr console_handler = logging.StreamHandler(stream) console_handler.setLevel(level) console_handler.setFormatter(console_fmt) if ram_capacity == 0: ram_handler = None else: ram_handler = RAMHandler(capacity=ram_capacity) ram_handler.setLevel(logging.NOTSET) ram_handler.setFormatter(ram_fmt) ram_handler.html_formatter = html_fmt return console_handler, ram_handler def _init_formatters(level, color, force_color, json_logging): """Init log formatters. Args: level: The numeric logging level. color: Whether to use color if available. force_color: Force colored output. json_logging: Format lines as JSON (disables all color). Return: A (console_formatter, ram_formatter, use_colorama) tuple. console_formatter/ram_formatter: logging.Formatter instances. use_colorama: Whether to use colorama. """ console_fmt = EXTENDED_FMT if level <= logging.DEBUG else SIMPLE_FMT ram_formatter = ColoredFormatter(EXTENDED_FMT, DATEFMT, '{', use_colors=False) html_formatter = HTMLFormatter(EXTENDED_FMT_HTML, DATEFMT, log_colors=LOG_COLORS) if sys.stderr is None: return None, ram_formatter, html_formatter, False if json_logging: console_formatter = JSONFormatter() return console_formatter, ram_formatter, html_formatter, False use_colorama = False color_supported = os.name == 'posix' or colorama if color_supported and (sys.stderr.isatty() or force_color) and color: use_colors = True if colorama and os.name != 'posix': use_colorama = True else: use_colors = False console_formatter = ColoredFormatter(console_fmt, DATEFMT, '{', use_colors=use_colors) return console_formatter, ram_formatter, html_formatter, use_colorama def qt_message_handler(msg_type, context, msg): """Qt message handler to redirect qWarning etc. to the logging system. Args: QtMsgType msg_type: The level of the message. QMessageLogContext context: The source code location of the message. msg: The message text. """ # Mapping from Qt logging levels to the matching logging module levels. # Note we map critical to ERROR as it's actually "just" an error, and fatal # to critical. qt_to_logging = { QtCore.QtDebugMsg: logging.DEBUG, QtCore.QtWarningMsg: logging.WARNING, QtCore.QtCriticalMsg: logging.ERROR, QtCore.QtFatalMsg: logging.CRITICAL, } try: # pylint: disable=no-member,useless-suppression qt_to_logging[QtCore.QtInfoMsg] = logging.INFO except AttributeError: # Qt < 5.5 pass # Change levels of some well-known messages to debug so they don't get # shown to the user. # # If a message starts with any text in suppressed_msgs, it's not logged as # error. suppressed_msgs = [ # PNGs in Qt with broken color profile # https://bugreports.qt.io/browse/QTBUG-39788 'libpng warning: iCCP: Not recognizing known sRGB profile that has ' 'been edited', # flake8: disable=E131 'libpng warning: iCCP: known incorrect sRGB profile', # Hopefully harmless warning 'OpenType support missing for script ', # Error if a QNetworkReply gets two different errors set. Harmless Qt # bug on some pages. # https://bugreports.qt.io/browse/QTBUG-30298 'QNetworkReplyImplPrivate::error: Internal problem, this method must ' 'only be called once.', # Sometimes indicates missing text, but most of the time harmless 'load glyph failed ', # Harmless, see https://bugreports.qt.io/browse/QTBUG-42479 'content-type missing in HTTP POST, defaulting to ' 'application/x-www-form-urlencoded. ' 'Use QNetworkRequest::setHeader() to fix this problem.', # https://bugreports.qt.io/browse/QTBUG-43118 'Using blocking call!', # Hopefully harmless '"Method "GetAll" with signature "s" on interface ' '"org.freedesktop.DBus.Properties" doesn\'t exist', '"Method \\"GetAll\\" with signature \\"s\\" on interface ' '\\"org.freedesktop.DBus.Properties\\" doesn\'t exist\\n"', 'WOFF support requires QtWebKit to be built with zlib support.', # Weird Enlightment/GTK X extensions 'QXcbWindow: Unhandled client message: "_E_', 'QXcbWindow: Unhandled client message: "_ECORE_', 'QXcbWindow: Unhandled client message: "_GTK_', # Happens on AppVeyor CI 'SetProcessDpiAwareness failed:', # https://bugreports.qt.io/browse/QTBUG-49174 'QObject::connect: Cannot connect (null)::stateChanged(' 'QNetworkSession::State) to ' 'QNetworkReplyHttpImpl::_q_networkSessionStateChanged(' 'QNetworkSession::State)', # https://bugreports.qt.io/browse/QTBUG-53989 "Image of format '' blocked because it is not considered safe. If you " "are sure it is safe to do so, you can white-list the format by " "setting the environment variable QTWEBKIT_IMAGEFORMAT_WHITELIST=", # Installing Qt from the installer may cause it looking for SSL3 which # may not be available on the system "QSslSocket: cannot resolve SSLv3_client_method", "QSslSocket: cannot resolve SSLv3_server_method", ] if sys.platform == 'darwin': suppressed_msgs += [ 'libpng warning: iCCP: known incorrect sRGB profile', # https://bugreports.qt.io/browse/QTBUG-47154 'virtual void QSslSocketBackendPrivate::transmit() SSLRead failed ' 'with: -9805', # flake8: disable=E131 ] # Messages which will trigger an exception immediately critical_msgs = [ 'Could not parse stylesheet of object', ] if any(msg.strip().startswith(pattern) for pattern in critical_msgs): # For some reason, the stack gets lost when raising here... logger = logging.getLogger('misc') logger.error("Got critical Qt warning!", stack_info=True) raise CriticalQtWarning(msg) elif any(msg.strip().startswith(pattern) for pattern in suppressed_msgs): level = logging.DEBUG else: level = qt_to_logging[msg_type] if context.function is None: func = 'none' elif ':' in context.function: func = '"{}"'.format(context.function) else: func = context.function if context.category is None or context.category == 'default': name = 'qt' else: name = 'qt-' + context.category if msg.splitlines()[0] == ('This application failed to start because it ' 'could not find or load the Qt platform plugin ' '"xcb".'): # Handle this message specially. msg += ("\n\nOn Archlinux, this should fix the problem:\n" " pacman -S libxkbcommon-x11") faulthandler.disable() stack = ''.join(traceback.format_stack()) record = qt.makeRecord(name, level, context.file, context.line, msg, None, None, func, sinfo=stack) qt.handle(record) @contextlib.contextmanager def hide_qt_warning(pattern, logger='qt'): """Hide Qt warnings matching the given regex.""" log_filter = QtWarningFilter(pattern) logger_obj = logging.getLogger(logger) logger_obj.addFilter(log_filter) try: yield finally: logger_obj.removeFilter(log_filter) class QtWarningFilter(logging.Filter): """Filter to filter Qt warnings. Attributes: _pattern: The start of the message. """ def __init__(self, pattern): super().__init__() self._pattern = pattern def filter(self, record): """Determine if the specified record is to be logged.""" do_log = not record.msg.strip().startswith(self._pattern) return do_log class LogFilter(logging.Filter): """Filter to filter log records based on the commandline argument. The default Filter only supports one name to show - we support a comma-separated list instead. Attributes: _names: A list of names that should be logged. """ def __init__(self, names): super().__init__() self._names = names def filter(self, record): """Determine if the specified record is to be logged.""" if self._names is None: return True if record.levelno > logging.DEBUG: # More important than DEBUG, so we won't filter at all return True for name in self._names: if record.name == name: return True elif not record.name.startswith(name): continue elif record.name[len(name)] == '.': return True return False class RAMHandler(logging.Handler): """Logging handler which keeps the messages in a deque in RAM. Loosely based on logging.BufferingHandler which is unsuitable because it uses a simple list rather than a deque. Attributes: _data: A deque containing the logging records. """ def __init__(self, capacity): super().__init__() self.html_formatter = None if capacity != -1: self._data = collections.deque(maxlen=capacity) else: self._data = collections.deque() def emit(self, record): if record.levelno >= logging.DEBUG: # We don't log VDEBUG to RAM. self._data.append(record) def dump_log(self, html=False, level='vdebug'): """Dump the complete formatted log data as string. FIXME: We should do all the HTML formatter via jinja2. (probably obsolete when moving to a widget for logging, https://github.com/The-Compiler/qutebrowser/issues/34 """ minlevel = LOG_LEVELS.get(level.upper(), VDEBUG_LEVEL) lines = [] fmt = self.html_formatter.format if html else self.format self.acquire() try: records = list(self._data) finally: self.release() for record in records: if record.levelno >= minlevel: lines.append(fmt(record)) return '\n'.join(lines) class ColoredFormatter(logging.Formatter): """Logging formatter to output colored logs. Attributes: use_colors: Whether to do colored logging or not. """ def __init__(self, fmt, datefmt, style, *, use_colors): super().__init__(fmt, datefmt, style) self._use_colors = use_colors def format(self, record): if self._use_colors: color_dict = dict(COLOR_ESCAPES) color_dict['reset'] = RESET_ESCAPE log_color = LOG_COLORS[record.levelname] color_dict['log_color'] = COLOR_ESCAPES[log_color] else: color_dict = {color: '' for color in COLOR_ESCAPES} color_dict['reset'] = '' color_dict['log_color'] = '' record.__dict__.update(color_dict) return super().format(record) class HTMLFormatter(logging.Formatter): """Formatter for HTML-colored log messages. Attributes: _log_colors: The colors to use for logging levels. _colordict: The colordict passed to the logger. """ def __init__(self, fmt, datefmt, log_colors): """Constructor. Args: fmt: The format string to use. datefmt: The date format to use. log_colors: The colors to use for logging levels. """ super().__init__(fmt, datefmt) self._log_colors = log_colors self._colordict = {} # We could solve this nicer by using CSS, but for this simple case this # works. for color in COLORS: self._colordict[color] = '<font color="{}">'.format(color) self._colordict['reset'] = '</font>' def format(self, record): record.__dict__.update(self._colordict) if record.levelname in self._log_colors: color = self._log_colors[record.levelname] record.log_color = self._colordict[color] else: record.log_color = '' for field in ['msg', 'filename', 'funcName', 'levelname', 'module', 'name', 'pathname', 'processName', 'threadName']: data = str(getattr(record, field)) setattr(record, field, pyhtml.escape(data)) msg = super().format(record) if not msg.endswith(self._colordict['reset']): msg += self._colordict['reset'] return msg def formatTime(self, record, datefmt=None): out = super().formatTime(record, datefmt) return pyhtml.escape(out) class JSONFormatter(logging.Formatter): """Formatter for JSON-encoded log messages.""" def format(self, record): obj = {} for field in ['created', 'levelname', 'name', 'module', 'funcName', 'lineno', 'levelno']: obj[field] = getattr(record, field) obj['message'] = record.getMessage() if record.exc_info is not None: obj['traceback'] = super().formatException(record.exc_info) return json.dumps(obj)
1
15,581
You somehow got a change from `master` into your branch again, in e6d2167085688264e5ee6a81cfd2a7a8f10ded13. While it's not something terribly bad (as it will be fine after merging), it's kind of confusing, and I have no idea how it happens :laughing: Are you trying to update changes from `master` while working on your branch somehow? How do you do that?
qutebrowser-qutebrowser
py
@@ -3083,10 +3083,14 @@ initiate_request (OtPullData *pull_data, g_clear_pointer (&delta_from_revision, g_free); } - /* This is similar to the below, except we *might* use the previous - * commit, or we might do a scratch delta first. + /* If the current ref is different from the target, then this is similar + * to the below, except we *might* use the previous commit, or we might do + * a scratch delta first. */ - initiate_delta_request (pull_data, delta_from_revision ?: NULL, to_revision, ref); + if (!(delta_from_revision && g_str_equal (delta_from_revision, to_revision))) + initiate_delta_request (pull_data, delta_from_revision ?: NULL, to_revision, ref); + else + queue_scan_one_metadata_object (pull_data, to_revision, OSTREE_OBJECT_TYPE_COMMIT, NULL, 0, ref); } else {
1
/* * Copyright (C) 2011,2012,2013 Colin Walters <[email protected]> * Copyright © 2017 Endless Mobile, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Authors: * - Colin Walters <[email protected]> * - Philip Withnall <[email protected]> */ #include "config.h" #include "libglnx.h" #include "ostree.h" #include "otutil.h" #ifdef HAVE_LIBCURL_OR_LIBSOUP #include "ostree-core-private.h" #include "ostree-repo-private.h" #include "ostree-repo-static-delta-private.h" #include "ostree-metalink.h" #include "ostree-fetcher-util.h" #include "ostree-remote-private.h" #include "ot-fs-utils.h" #ifdef OSTREE_ENABLE_EXPERIMENTAL_API #include "ostree-repo-finder.h" #include "ostree-repo-finder-config.h" #include "ostree-repo-finder-mount.h" #ifdef HAVE_AVAHI #include "ostree-repo-finder-avahi.h" #endif /* HAVE_AVAHI */ #endif /* OSTREE_ENABLE_EXPERIMENTAL_API */ #include <gio/gunixinputstream.h> #include <sys/statvfs.h> #ifdef HAVE_LIBSYSTEMD #include <systemd/sd-journal.h> #endif #define OSTREE_MESSAGE_FETCH_COMPLETE_ID SD_ID128_MAKE(75,ba,3d,eb,0a,f0,41,a9,a4,62,72,ff,85,d9,e7,3e) #define OSTREE_REPO_PULL_CONTENT_PRIORITY (OSTREE_FETCHER_DEFAULT_PRIORITY) #define OSTREE_REPO_PULL_METADATA_PRIORITY (OSTREE_REPO_PULL_CONTENT_PRIORITY - 100) typedef enum { OSTREE_FETCHER_SECURITY_STATE_CA_PINNED, OSTREE_FETCHER_SECURITY_STATE_TLS, OSTREE_FETCHER_SECURITY_STATE_INSECURE, } OstreeFetcherSecurityState; typedef struct { OstreeRepo *repo; int tmpdir_dfd; OstreeRepoPullFlags flags; char *remote_name; OstreeRepoMode remote_mode; OstreeFetcher *fetcher; OstreeFetcherSecurityState fetcher_security_state; GPtrArray *meta_mirrorlist; /* List of base URIs for fetching metadata */ GPtrArray *content_mirrorlist; /* List of base URIs for fetching content */ OstreeRepo *remote_repo_local; GPtrArray *localcache_repos; /* Array<OstreeRepo> */ GMainContext *main_context; GCancellable *cancellable; OstreeAsyncProgress *progress; GVariant *extra_headers; gboolean dry_run; gboolean dry_run_emitted_progress; gboolean legacy_transaction_resuming; enum { OSTREE_PULL_PHASE_FETCHING_REFS, OSTREE_PULL_PHASE_FETCHING_OBJECTS } phase; gint n_scanned_metadata; gboolean gpg_verify; gboolean require_static_deltas; gboolean disable_static_deltas; gboolean gpg_verify_summary; gboolean has_tombstone_commits; GBytes *summary_data; GBytes *summary_data_sig; GVariant *summary; GHashTable *summary_deltas_checksums; GHashTable *ref_original_commits; /* Maps checksum to commit, used by timestamp checks */ GPtrArray *static_delta_superblocks; GHashTable *expected_commit_sizes; /* Maps commit checksum to known size */ GHashTable *commit_to_depth; /* Maps commit checksum maximum depth */ GHashTable *scanned_metadata; /* Maps object name to itself */ GHashTable *fetched_detached_metadata; /* Set<checksum> */ GHashTable *requested_metadata; /* Maps object name to itself */ GHashTable *requested_content; /* Maps checksum to itself */ GHashTable *requested_fallback_content; /* Maps checksum to itself */ GHashTable *pending_fetch_metadata; /* Map<ObjectName,FetchObjectData> */ GHashTable *pending_fetch_content; /* Map<checksum,FetchObjectData> */ GHashTable *pending_fetch_deltaparts; /* Set<FetchStaticDeltaData> */ guint n_outstanding_metadata_fetches; guint n_outstanding_metadata_write_requests; guint n_outstanding_content_fetches; guint n_outstanding_content_write_requests; guint n_outstanding_deltapart_fetches; guint n_outstanding_deltapart_write_requests; guint n_total_deltaparts; guint n_total_delta_fallbacks; guint64 fetched_deltapart_size; /* How much of the delta we have now */ guint64 total_deltapart_size; guint64 total_deltapart_usize; gint n_requested_metadata; gint n_requested_content; guint n_fetched_deltaparts; guint n_fetched_deltapart_fallbacks; guint n_fetched_metadata; guint n_fetched_content; /* Objects from pull --localcache-repo */ guint n_fetched_localcache_metadata; guint n_fetched_localcache_content; gboolean timestamp_check; /* Verify commit timestamps */ int maxdepth; guint64 start_time; gboolean is_mirror; gboolean is_commit_only; OstreeRepoImportFlags importflags; GPtrArray *dirs; gboolean have_previous_bytes; guint64 previous_bytes_sec; guint64 previous_total_downloaded; GError *cached_async_error; GError **async_error; gboolean caught_error; GQueue scan_object_queue; GSource *idle_src; } OtPullData; typedef struct { OtPullData *pull_data; GVariant *object; char *path; gboolean is_detached_meta; /* Only relevant when is_detached_meta is TRUE. Controls * whether to fetch the primary object after fetching its * detached metadata (no need if it's already stored). */ gboolean object_is_stored; OstreeCollectionRef *requested_ref; /* (nullable) */ } FetchObjectData; typedef struct { OtPullData *pull_data; GVariant *objects; char *expected_checksum; char *from_revision; char *to_revision; guint i; guint64 size; } FetchStaticDeltaData; typedef struct { guchar csum[OSTREE_SHA256_DIGEST_LEN]; char *path; OstreeObjectType objtype; guint recursion_depth; /* NB: not used anymore, though might be nice to print */ OstreeCollectionRef *requested_ref; /* (nullable) */ } ScanObjectQueueData; static void start_fetch (OtPullData *pull_data, FetchObjectData *fetch); static void start_fetch_deltapart (OtPullData *pull_data, FetchStaticDeltaData *fetch); static gboolean fetcher_queue_is_full (OtPullData *pull_data); static void queue_scan_one_metadata_object (OtPullData *pull_data, const char *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref); static void queue_scan_one_metadata_object_c (OtPullData *pull_data, const guchar *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref); static gboolean scan_one_metadata_object_c (OtPullData *pull_data, const guchar *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref, GCancellable *cancellable, GError **error); static void scan_object_queue_data_free (ScanObjectQueueData *scan_data); static gboolean update_progress (gpointer user_data) { OtPullData *pull_data; guint outstanding_writes; guint outstanding_fetches; guint64 bytes_transferred; guint fetched; guint requested; guint n_scanned_metadata; guint64 start_time; pull_data = user_data; if (! pull_data->progress) return FALSE; /* In dry run, we only emit progress once metadata is done */ if (pull_data->dry_run && pull_data->n_outstanding_metadata_fetches > 0) return TRUE; outstanding_writes = pull_data->n_outstanding_content_write_requests + pull_data->n_outstanding_metadata_write_requests + pull_data->n_outstanding_deltapart_write_requests; outstanding_fetches = pull_data->n_outstanding_content_fetches + pull_data->n_outstanding_metadata_fetches + pull_data->n_outstanding_deltapart_fetches; bytes_transferred = _ostree_fetcher_bytes_transferred (pull_data->fetcher); fetched = pull_data->n_fetched_metadata + pull_data->n_fetched_content; requested = pull_data->n_requested_metadata + pull_data->n_requested_content; n_scanned_metadata = pull_data->n_scanned_metadata; start_time = pull_data->start_time; ostree_async_progress_set (pull_data->progress, "outstanding-fetches", "u", outstanding_fetches, "outstanding-writes", "u", outstanding_writes, "fetched", "u", fetched, "requested", "u", requested, "scanning", "u", g_queue_is_empty (&pull_data->scan_object_queue) ? 0 : 1, "caught-error", "b", pull_data->caught_error, "scanned-metadata", "u", n_scanned_metadata, "bytes-transferred", "t", bytes_transferred, "start-time", "t", start_time, "metadata-fetched-localcache", "u", pull_data->n_fetched_localcache_metadata, "content-fetched-localcache", "u", pull_data->n_fetched_localcache_content, /* Deltas */ "fetched-delta-parts", "u", pull_data->n_fetched_deltaparts, "total-delta-parts", "u", pull_data->n_total_deltaparts, "fetched-delta-fallbacks", "u", pull_data->n_fetched_deltapart_fallbacks, "total-delta-fallbacks", "u", pull_data->n_total_delta_fallbacks, "fetched-delta-part-size", "t", pull_data->fetched_deltapart_size, "total-delta-part-size", "t", pull_data->total_deltapart_size, "total-delta-part-usize", "t", pull_data->total_deltapart_usize, "total-delta-superblocks", "u", pull_data->static_delta_superblocks->len, /* We fetch metadata before content. These allow us to report metadata fetch progress specifically. */ "outstanding-metadata-fetches", "u", pull_data->n_outstanding_metadata_fetches, "metadata-fetched", "u", pull_data->n_fetched_metadata, /* Overall status. */ "status", "s", "", NULL); if (pull_data->dry_run) pull_data->dry_run_emitted_progress = TRUE; return TRUE; } /* The core logic function for whether we should continue the main loop */ static gboolean pull_termination_condition (OtPullData *pull_data) { gboolean current_fetch_idle = (pull_data->n_outstanding_metadata_fetches == 0 && pull_data->n_outstanding_content_fetches == 0 && pull_data->n_outstanding_deltapart_fetches == 0); gboolean current_write_idle = (pull_data->n_outstanding_metadata_write_requests == 0 && pull_data->n_outstanding_content_write_requests == 0 && pull_data->n_outstanding_deltapart_write_requests == 0 ); gboolean current_scan_idle = g_queue_is_empty (&pull_data->scan_object_queue); gboolean current_idle = current_fetch_idle && current_write_idle && current_scan_idle; /* we only enter the main loop when we're fetching objects */ g_assert (pull_data->phase == OSTREE_PULL_PHASE_FETCHING_OBJECTS); if (pull_data->dry_run) return pull_data->dry_run_emitted_progress; if (current_idle) g_debug ("pull: idle, exiting mainloop"); return current_idle; } /* Most async operations finish by calling this function; it will consume * @errorp if set, update statistics, and initiate processing of any further * requests as appropriate. */ static void check_outstanding_requests_handle_error (OtPullData *pull_data, GError **errorp) { g_assert (errorp); GError *error = *errorp; if (error) { if (!pull_data->caught_error) { pull_data->caught_error = TRUE; g_propagate_error (pull_data->async_error, g_steal_pointer (errorp)); } else { g_clear_error (errorp); } } /* If we're in error state, we wait for any pending operations to complete, * but ensure that all no further operations are queued. */ if (pull_data->caught_error) { g_queue_foreach (&pull_data->scan_object_queue, (GFunc) scan_object_queue_data_free, NULL); g_queue_clear (&pull_data->scan_object_queue); g_hash_table_remove_all (pull_data->pending_fetch_metadata); g_hash_table_remove_all (pull_data->pending_fetch_deltaparts); g_hash_table_remove_all (pull_data->pending_fetch_content); } else { GHashTableIter hiter; gpointer key, value; /* We may have just completed an async fetch operation. Now we look at * possibly enqueuing more requests. The goal of queuing is to both avoid * overloading the fetcher backend with HTTP requests, but also to * prioritize metadata fetches over content, so we have accurate * reporting. Hence here, we process metadata fetches first. */ /* Try filling the queue with metadata we need to fetch */ g_hash_table_iter_init (&hiter, pull_data->pending_fetch_metadata); while (!fetcher_queue_is_full (pull_data) && g_hash_table_iter_next (&hiter, &key, &value)) { GVariant *objname = key; FetchObjectData *fetch = value; /* Steal both key and value */ g_hash_table_iter_steal (&hiter); /* This takes ownership of the value */ start_fetch (pull_data, fetch); /* And unref the key */ g_variant_unref (objname); } /* Now, process deltapart requests */ g_hash_table_iter_init (&hiter, pull_data->pending_fetch_deltaparts); while (!fetcher_queue_is_full (pull_data) && g_hash_table_iter_next (&hiter, &key, &value)) { FetchStaticDeltaData *fetch = key; g_hash_table_iter_steal (&hiter); /* Takes ownership */ start_fetch_deltapart (pull_data, fetch); } /* Next, fill the queue with content */ g_hash_table_iter_init (&hiter, pull_data->pending_fetch_content); while (!fetcher_queue_is_full (pull_data) && g_hash_table_iter_next (&hiter, &key, &value)) { char *checksum = key; FetchObjectData *fetch = value; /* Steal both key and value */ g_hash_table_iter_steal (&hiter); /* This takes ownership of the value */ start_fetch (pull_data, fetch); /* And unref the key */ g_free (checksum); } } } /* We have a total-request limit, as well has a hardcoded max of 2 for delta * parts. The logic for the delta one is that processing them is expensive, and * doing multiple simultaneously could risk space/memory on smaller devices. We * also throttle on outstanding writes in case fetches are faster. */ static gboolean fetcher_queue_is_full (OtPullData *pull_data) { const gboolean fetch_full = ((pull_data->n_outstanding_metadata_fetches + pull_data->n_outstanding_content_fetches + pull_data->n_outstanding_deltapart_fetches) == _OSTREE_MAX_OUTSTANDING_FETCHER_REQUESTS); const gboolean deltas_full = (pull_data->n_outstanding_deltapart_fetches == _OSTREE_MAX_OUTSTANDING_DELTAPART_REQUESTS); const gboolean writes_full = ((pull_data->n_outstanding_metadata_write_requests + pull_data->n_outstanding_content_write_requests + pull_data->n_outstanding_deltapart_write_requests) >= _OSTREE_MAX_OUTSTANDING_WRITE_REQUESTS); return fetch_full || deltas_full || writes_full; } static void scan_object_queue_data_free (ScanObjectQueueData *scan_data) { g_free (scan_data->path); if (scan_data->requested_ref != NULL) ostree_collection_ref_free (scan_data->requested_ref); g_free (scan_data); } static gboolean idle_worker (gpointer user_data) { OtPullData *pull_data = user_data; ScanObjectQueueData *scan_data; g_autoptr(GError) error = NULL; scan_data = g_queue_pop_head (&pull_data->scan_object_queue); if (!scan_data) { g_clear_pointer (&pull_data->idle_src, (GDestroyNotify) g_source_destroy); return G_SOURCE_REMOVE; } scan_one_metadata_object_c (pull_data, scan_data->csum, scan_data->objtype, scan_data->path, scan_data->recursion_depth, scan_data->requested_ref, pull_data->cancellable, &error); check_outstanding_requests_handle_error (pull_data, &error); scan_object_queue_data_free (scan_data); return G_SOURCE_CONTINUE; } static void ensure_idle_queued (OtPullData *pull_data) { GSource *idle_src; if (pull_data->idle_src) return; idle_src = g_idle_source_new (); g_source_set_callback (idle_src, idle_worker, pull_data, NULL); g_source_attach (idle_src, pull_data->main_context); g_source_unref (idle_src); pull_data->idle_src = idle_src; } typedef struct { OtPullData *pull_data; GInputStream *result_stream; } OstreeFetchUriSyncData; static gboolean fetch_mirrored_uri_contents_utf8_sync (OstreeFetcher *fetcher, GPtrArray *mirrorlist, const char *filename, char **out_contents, GCancellable *cancellable, GError **error) { g_autoptr(GBytes) bytes = NULL; if (!_ostree_fetcher_mirrored_request_to_membuf (fetcher, mirrorlist, filename, OSTREE_FETCHER_REQUEST_NUL_TERMINATION, &bytes, OSTREE_MAX_METADATA_SIZE, cancellable, error)) return FALSE; gsize len; g_autofree char *ret_contents = g_bytes_unref_to_data (g_steal_pointer (&bytes), &len); if (!g_utf8_validate (ret_contents, -1, NULL)) return glnx_throw (error, "Invalid UTF-8"); ot_transfer_out_value (out_contents, &ret_contents); return TRUE; } static gboolean fetch_uri_contents_utf8_sync (OstreeFetcher *fetcher, OstreeFetcherURI *uri, char **out_contents, GCancellable *cancellable, GError **error) { g_autoptr(GPtrArray) mirrorlist = g_ptr_array_new (); g_ptr_array_add (mirrorlist, uri); /* no transfer */ return fetch_mirrored_uri_contents_utf8_sync (fetcher, mirrorlist, NULL, out_contents, cancellable, error); } static gboolean write_commitpartial_for (OtPullData *pull_data, const char *checksum, GError **error) { g_autofree char *commitpartial_path = _ostree_get_commitpartial_path (checksum); glnx_fd_close int fd = openat (pull_data->repo->repo_dir_fd, commitpartial_path, O_EXCL | O_CREAT | O_WRONLY | O_CLOEXEC | O_NOCTTY, 0644); if (fd == -1) { if (errno != EEXIST) return glnx_throw_errno_prefix (error, "open(%s)", commitpartial_path); } return TRUE; } static void enqueue_one_object_request (OtPullData *pull_data, const char *checksum, OstreeObjectType objtype, const char *path, gboolean is_detached_meta, gboolean object_is_stored, const OstreeCollectionRef *ref); static gboolean matches_pull_dir (const char *current_file, const char *pull_dir, gboolean current_file_is_dir) { const char *rest; if (g_str_has_prefix (pull_dir, current_file)) { rest = pull_dir + strlen (current_file); if (*rest == 0) { /* The current file is exactly the same as the specified pull dir. This matches always, even if the file is not a directory. */ return TRUE; } if (*rest == '/') { /* The current file is a directory-prefix of the pull_dir. Match only if this is supposed to be a directory */ return current_file_is_dir; } /* Matched a non-directory prefix such as /foo being a prefix of /fooo, no match */ return FALSE; } if (g_str_has_prefix (current_file, pull_dir)) { rest = current_file + strlen (pull_dir); /* Only match if the prefix match matched the entire directory component */ return *rest == '/'; } return FALSE; } static gboolean pull_matches_subdir (OtPullData *pull_data, const char *path, const char *basename, gboolean basename_is_dir) { if (pull_data->dirs == NULL) return TRUE; g_autofree char *file = g_strconcat (path, basename, NULL); for (guint i = 0; i < pull_data->dirs->len; i++) { const char *pull_dir = g_ptr_array_index (pull_data->dirs, i); if (matches_pull_dir (file, pull_dir, basename_is_dir)) return TRUE; } return FALSE; } typedef struct { OtPullData *pull_data; OstreeRepo *src_repo; char checksum[OSTREE_SHA256_STRING_LEN+1]; } ImportLocalAsyncData; /* Asynchronously import a single content object. @src_repo is either * pull_data->remote_repo_local or one of pull_data->localcache_repos. */ static void async_import_in_thread (GTask *task, gpointer source, gpointer task_data, GCancellable *cancellable) { ImportLocalAsyncData *iataskdata = task_data; OtPullData *pull_data = iataskdata->pull_data; g_autoptr(GError) local_error = NULL; /* pull_data->importflags was set up in the pull option processing */ if (!_ostree_repo_import_object (pull_data->repo, iataskdata->src_repo, OSTREE_OBJECT_TYPE_FILE, iataskdata->checksum, pull_data->importflags, cancellable, &local_error)) g_task_return_error (task, g_steal_pointer (&local_error)); else g_task_return_boolean (task, TRUE); } /* Start an async import of a single object; currently used for content objects. * @src_repo is from pull_data->remote_repo_local or * pull_data->localcache_repos. * * One important special case here is handling the * OSTREE_REPO_PULL_FLAGS_BAREUSERONLY_FILES flag. */ static void async_import_one_local_content_object (OtPullData *pull_data, OstreeRepo *src_repo, const char *checksum, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { ImportLocalAsyncData *iataskdata = g_new0 (ImportLocalAsyncData, 1); iataskdata->pull_data = pull_data; iataskdata->src_repo = src_repo; memcpy (iataskdata->checksum, checksum, OSTREE_SHA256_STRING_LEN); g_autoptr(GTask) task = g_task_new (pull_data->repo, cancellable, callback, user_data); g_task_set_source_tag (task, async_import_one_local_content_object); g_task_set_task_data (task, iataskdata, g_free); pull_data->n_outstanding_content_write_requests++; g_task_run_in_thread (task, async_import_in_thread); } static gboolean async_import_one_local_content_object_finish (OtPullData *pull_data, GAsyncResult *result, GError **error) { g_return_val_if_fail (g_task_is_valid (result, pull_data->repo), FALSE); return g_task_propagate_boolean ((GTask*)result, error); } static void on_local_object_imported (GObject *object, GAsyncResult *result, gpointer user_data) { OtPullData *pull_data = user_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; if (!async_import_one_local_content_object_finish (pull_data, result, error)) goto out; out: g_assert_cmpint (pull_data->n_outstanding_content_write_requests, >, 0); pull_data->n_outstanding_content_write_requests--; check_outstanding_requests_handle_error (pull_data, &local_error); } static gboolean scan_dirtree_object (OtPullData *pull_data, const char *checksum, const char *path, int recursion_depth, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) tree = NULL; if (!ostree_repo_load_variant (pull_data->repo, OSTREE_OBJECT_TYPE_DIR_TREE, checksum, &tree, error)) return FALSE; /* PARSE OSTREE_SERIALIZED_TREE_VARIANT */ g_autoptr(GVariant) files_variant = g_variant_get_child_value (tree, 0); const guint n = g_variant_n_children (files_variant); for (guint i = 0; i < n; i++) { const char *filename; gboolean file_is_stored; g_autoptr(GVariant) csum = NULL; g_autofree char *file_checksum = NULL; g_variant_get_child (files_variant, i, "(&s@ay)", &filename, &csum); if (!ot_util_filename_validate (filename, error)) return FALSE; /* Skip files if we're traversing a request only directory, unless it exactly * matches the path */ if (!pull_matches_subdir (pull_data, path, filename, FALSE)) continue; file_checksum = ostree_checksum_from_bytes_v (csum); if (!ostree_repo_has_object (pull_data->repo, OSTREE_OBJECT_TYPE_FILE, file_checksum, &file_is_stored, cancellable, error)) return FALSE; /* If we already have this object, move on to the next */ if (file_is_stored) continue; /* Already have a request pending? If so, move on to the next */ if (g_hash_table_lookup (pull_data->requested_content, file_checksum)) continue; /* Is this a local repo? */ if (pull_data->remote_repo_local) { async_import_one_local_content_object (pull_data, pull_data->remote_repo_local, file_checksum, cancellable, on_local_object_imported, pull_data); g_hash_table_add (pull_data->requested_content, g_steal_pointer (&file_checksum)); /* Note early loop continue */ continue; } /* We're doing HTTP, but see if we have the object in a local cache first */ gboolean did_import_from_cache_repo = FALSE; if (pull_data->localcache_repos) { for (guint j = 0; j < pull_data->localcache_repos->len; j++) { OstreeRepo *localcache_repo = pull_data->localcache_repos->pdata[j]; gboolean localcache_repo_has_obj; if (!ostree_repo_has_object (localcache_repo, OSTREE_OBJECT_TYPE_FILE, file_checksum, &localcache_repo_has_obj, cancellable, error)) return FALSE; if (!localcache_repo_has_obj) continue; async_import_one_local_content_object (pull_data, localcache_repo, file_checksum, cancellable, on_local_object_imported, pull_data); g_hash_table_add (pull_data->requested_content, g_steal_pointer (&file_checksum)); did_import_from_cache_repo = TRUE; pull_data->n_fetched_localcache_content++; break; } } if (did_import_from_cache_repo) continue; /* Note early continue */ /* Not available locally, queue a HTTP request */ g_hash_table_add (pull_data->requested_content, file_checksum); enqueue_one_object_request (pull_data, file_checksum, OSTREE_OBJECT_TYPE_FILE, path, FALSE, FALSE, NULL); file_checksum = NULL; /* Transfer ownership */ } g_autoptr(GVariant) dirs_variant = g_variant_get_child_value (tree, 1); const guint m = g_variant_n_children (dirs_variant); for (guint i = 0; i < m; i++) { const char *dirname = NULL; g_autoptr(GVariant) tree_csum = NULL; g_autoptr(GVariant) meta_csum = NULL; g_variant_get_child (dirs_variant, i, "(&s@ay@ay)", &dirname, &tree_csum, &meta_csum); if (!ot_util_filename_validate (dirname, error)) return FALSE; if (!pull_matches_subdir (pull_data, path, dirname, TRUE)) continue; const guchar *tree_csum_bytes = ostree_checksum_bytes_peek_validate (tree_csum, error); if (tree_csum_bytes == NULL) return FALSE; const guchar *meta_csum_bytes = ostree_checksum_bytes_peek_validate (meta_csum, error); if (meta_csum_bytes == NULL) return FALSE; g_autofree char *subpath = g_strconcat (path, dirname, "/", NULL); queue_scan_one_metadata_object_c (pull_data, tree_csum_bytes, OSTREE_OBJECT_TYPE_DIR_TREE, subpath, recursion_depth + 1, NULL); queue_scan_one_metadata_object_c (pull_data, meta_csum_bytes, OSTREE_OBJECT_TYPE_DIR_META, subpath, recursion_depth + 1, NULL); } return TRUE; } /* Given a @ref, fetch its contents (should be a SHA256 ASCII string) */ static gboolean fetch_ref_contents (OtPullData *pull_data, const char *main_collection_id, const OstreeCollectionRef *ref, char **out_contents, GCancellable *cancellable, GError **error) { g_autofree char *filename = NULL; if (ref->collection_id == NULL || g_strcmp0 (ref->collection_id, main_collection_id) == 0) filename = g_build_filename ("refs", "heads", ref->ref_name, NULL); else filename = g_build_filename ("refs", "mirrors", ref->collection_id, ref->ref_name, NULL); g_autofree char *ret_contents = NULL; if (!fetch_mirrored_uri_contents_utf8_sync (pull_data->fetcher, pull_data->meta_mirrorlist, filename, &ret_contents, cancellable, error)) return FALSE; g_strchomp (ret_contents); if (!ostree_validate_checksum_string (ret_contents, error)) return glnx_prefix_error (error, "Fetching %s", filename); ot_transfer_out_value (out_contents, &ret_contents); return TRUE; } static gboolean lookup_commit_checksum_and_collection_from_summary (OtPullData *pull_data, const OstreeCollectionRef *ref, char **out_checksum, gsize *out_size, char **out_collection_id, GError **error) { g_autoptr(GVariant) additional_metadata = g_variant_get_child_value (pull_data->summary, 1); const gchar *main_collection_id; if (!g_variant_lookup (additional_metadata, OSTREE_SUMMARY_COLLECTION_ID, "&s", &main_collection_id)) main_collection_id = NULL; g_autoptr(GVariant) refs = NULL; const gchar *resolved_collection_id = NULL; if (ref->collection_id == NULL || g_strcmp0 (ref->collection_id, main_collection_id) == 0) { refs = g_variant_get_child_value (pull_data->summary, 0); resolved_collection_id = main_collection_id; } else if (ref->collection_id != NULL) { g_autoptr(GVariant) collection_map = NULL; collection_map = g_variant_lookup_value (additional_metadata, OSTREE_SUMMARY_COLLECTION_MAP, G_VARIANT_TYPE ("a{sa(s(taya{sv}))}")); if (collection_map != NULL) refs = g_variant_lookup_value (collection_map, ref->collection_id, G_VARIANT_TYPE ("a(s(taya{sv}))")); resolved_collection_id = ref->collection_id; } int i; if (refs == NULL || !ot_variant_bsearch_str (refs, ref->ref_name, &i)) { if (ref->collection_id != NULL) return glnx_throw (error, "No such branch (%s, %s) in repository summary", ref->collection_id, ref->ref_name); else return glnx_throw (error, "No such branch '%s' in repository summary", ref->ref_name); } g_autoptr(GVariant) refdata = g_variant_get_child_value (refs, i); g_autoptr(GVariant) reftargetdata = g_variant_get_child_value (refdata, 1); guint64 commit_size; g_autoptr(GVariant) commit_csum_v = NULL; g_variant_get (reftargetdata, "(t@ay@a{sv})", &commit_size, &commit_csum_v, NULL); if (resolved_collection_id != NULL && !ostree_validate_collection_id (resolved_collection_id, error)) return FALSE; if (!ostree_validate_structureof_csum_v (commit_csum_v, error)) return FALSE; *out_checksum = ostree_checksum_from_bytes_v (commit_csum_v); *out_size = commit_size; *out_collection_id = g_strdup (resolved_collection_id); return TRUE; } static void fetch_object_data_free (FetchObjectData *fetch_data) { g_variant_unref (fetch_data->object); g_free (fetch_data->path); if (fetch_data->requested_ref) ostree_collection_ref_free (fetch_data->requested_ref); g_free (fetch_data); } static void content_fetch_on_write_complete (GObject *object, GAsyncResult *result, gpointer user_data) { FetchObjectData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; OstreeObjectType objtype; const char *expected_checksum; g_autofree guchar *csum = NULL; g_autofree char *checksum = NULL; g_autofree char *checksum_obj = NULL; if (!ostree_repo_write_content_finish ((OstreeRepo*)object, result, &csum, error)) goto out; checksum = ostree_checksum_from_bytes (csum); ostree_object_name_deserialize (fetch_data->object, &expected_checksum, &objtype); g_assert (objtype == OSTREE_OBJECT_TYPE_FILE); checksum_obj = ostree_object_to_string (checksum, objtype); g_debug ("write of %s complete", checksum_obj); if (strcmp (checksum, expected_checksum) != 0) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Corrupted content object; checksum expected='%s' actual='%s'", expected_checksum, checksum); goto out; } pull_data->n_fetched_content++; /* Was this a delta fallback? */ if (g_hash_table_remove (pull_data->requested_fallback_content, expected_checksum)) pull_data->n_fetched_deltapart_fallbacks++; out: pull_data->n_outstanding_content_write_requests--; check_outstanding_requests_handle_error (pull_data, &local_error); fetch_object_data_free (fetch_data); } static void content_fetch_on_complete (GObject *object, GAsyncResult *result, gpointer user_data) { OstreeFetcher *fetcher = (OstreeFetcher *)object; FetchObjectData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; GCancellable *cancellable = NULL; guint64 length; g_autoptr(GFileInfo) file_info = NULL; g_autoptr(GVariant) xattrs = NULL; g_autoptr(GInputStream) file_in = NULL; g_autoptr(GInputStream) object_input = NULL; g_auto(OtCleanupUnlinkat) tmp_unlinker = { _ostree_fetcher_get_dfd (fetcher), NULL }; const char *checksum; g_autofree char *checksum_obj = NULL; OstreeObjectType objtype; gboolean free_fetch_data = TRUE; if (!_ostree_fetcher_request_to_tmpfile_finish (fetcher, result, &tmp_unlinker.path, error)) goto out; ostree_object_name_deserialize (fetch_data->object, &checksum, &objtype); g_assert (objtype == OSTREE_OBJECT_TYPE_FILE); checksum_obj = ostree_object_to_string (checksum, objtype); g_debug ("fetch of %s complete", checksum_obj); const gboolean verifying_bareuseronly = (pull_data->importflags & _OSTREE_REPO_IMPORT_FLAGS_VERIFY_BAREUSERONLY) > 0; /* If we're mirroring and writing into an archive repo, and both checksum and * bareuseronly are turned off, we can directly copy the content rather than * paying the cost of exploding it, checksumming, and re-gzip. */ const gboolean mirroring_into_archive = pull_data->is_mirror && pull_data->repo->mode == OSTREE_REPO_MODE_ARCHIVE; const gboolean import_trusted = !verifying_bareuseronly && (pull_data->importflags & _OSTREE_REPO_IMPORT_FLAGS_TRUSTED) > 0; if (mirroring_into_archive && import_trusted) { gboolean have_object; if (!ostree_repo_has_object (pull_data->repo, OSTREE_OBJECT_TYPE_FILE, checksum, &have_object, cancellable, error)) goto out; if (!have_object) { if (!_ostree_repo_commit_path_final (pull_data->repo, checksum, objtype, &tmp_unlinker, cancellable, error)) goto out; } pull_data->n_fetched_content++; } else { /* Non-mirroring path */ /* If it appears corrupted, we'll delete it below */ if (!ostree_content_file_parse_at (TRUE, _ostree_fetcher_get_dfd (fetcher), tmp_unlinker.path, FALSE, &file_in, &file_info, &xattrs, cancellable, error)) goto out; /* Also, delete it now that we've opened it, we'll hold * a reference to the fd. If we fail to validate or write, then * the temp space will be cleaned up. */ ot_cleanup_unlinkat (&tmp_unlinker); if (verifying_bareuseronly) { if (!_ostree_validate_bareuseronly_mode_finfo (file_info, checksum, error)) goto out; } if (!ostree_raw_file_to_content_stream (file_in, file_info, xattrs, &object_input, &length, cancellable, error)) goto out; pull_data->n_outstanding_content_write_requests++; ostree_repo_write_content_async (pull_data->repo, checksum, object_input, length, cancellable, content_fetch_on_write_complete, fetch_data); free_fetch_data = FALSE; } out: pull_data->n_outstanding_content_fetches--; check_outstanding_requests_handle_error (pull_data, &local_error); if (free_fetch_data) fetch_object_data_free (fetch_data); } static void on_metadata_written (GObject *object, GAsyncResult *result, gpointer user_data) { FetchObjectData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; const char *expected_checksum; OstreeObjectType objtype; g_autofree char *checksum = NULL; g_autofree guchar *csum = NULL; g_autofree char *stringified_object = NULL; if (!ostree_repo_write_metadata_finish ((OstreeRepo*)object, result, &csum, error)) goto out; checksum = ostree_checksum_from_bytes (csum); ostree_object_name_deserialize (fetch_data->object, &expected_checksum, &objtype); g_assert (OSTREE_OBJECT_TYPE_IS_META (objtype)); stringified_object = ostree_object_to_string (checksum, objtype); g_debug ("write of %s complete", stringified_object); if (strcmp (checksum, expected_checksum) != 0) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Corrupted metadata object; checksum expected='%s' actual='%s'", expected_checksum, checksum); goto out; } queue_scan_one_metadata_object_c (pull_data, csum, objtype, fetch_data->path, 0, fetch_data->requested_ref); out: pull_data->n_outstanding_metadata_write_requests--; fetch_object_data_free (fetch_data); check_outstanding_requests_handle_error (pull_data, &local_error); } static void meta_fetch_on_complete (GObject *object, GAsyncResult *result, gpointer user_data) { OstreeFetcher *fetcher = (OstreeFetcher *)object; FetchObjectData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autoptr(GVariant) metadata = NULL; g_auto(OtCleanupUnlinkat) tmp_unlinker = { _ostree_fetcher_get_dfd (fetcher), NULL }; const char *checksum; g_autofree char *checksum_obj = NULL; OstreeObjectType objtype; g_autoptr(GError) local_error = NULL; GError **error = &local_error; glnx_fd_close int fd = -1; gboolean free_fetch_data = TRUE; ostree_object_name_deserialize (fetch_data->object, &checksum, &objtype); checksum_obj = ostree_object_to_string (checksum, objtype); g_debug ("fetch of %s%s complete", checksum_obj, fetch_data->is_detached_meta ? " (detached)" : ""); if (!_ostree_fetcher_request_to_tmpfile_finish (fetcher, result, &tmp_unlinker.path, error)) { if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) { if (fetch_data->is_detached_meta) { /* There isn't any detached metadata, just fetch the commit */ g_clear_error (&local_error); /* Now that we've at least tried to fetch it, we can proceed to * scan/fetch the commit object */ g_hash_table_add (pull_data->fetched_detached_metadata, g_strdup (checksum)); if (!fetch_data->object_is_stored) enqueue_one_object_request (pull_data, checksum, objtype, fetch_data->path, FALSE, FALSE, fetch_data->requested_ref); else queue_scan_one_metadata_object (pull_data, checksum, objtype, fetch_data->path, 0, fetch_data->requested_ref); } /* When traversing parents, do not fail on a missing commit. * We may be pulling from a partial repository that ends in * a dangling parent reference. */ else if (objtype == OSTREE_OBJECT_TYPE_COMMIT && pull_data->maxdepth != 0) { g_clear_error (&local_error); /* If the remote repo supports tombstone commits, check if the commit was intentionally deleted. */ if (pull_data->has_tombstone_commits) { enqueue_one_object_request (pull_data, checksum, OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT, fetch_data->path, FALSE, FALSE, NULL); } } } goto out; } /* Tombstone commits are always empty, so skip all processing here */ if (objtype == OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT) goto out; if (!glnx_openat_rdonly (_ostree_fetcher_get_dfd (fetcher), tmp_unlinker.path, TRUE, &fd, error)) goto out; /* Now delete it, keeping the fd open as the last reference; see comment in * corresponding content fetch path. */ ot_cleanup_unlinkat (&tmp_unlinker); if (fetch_data->is_detached_meta) { if (!ot_util_variant_map_fd (fd, 0, G_VARIANT_TYPE ("a{sv}"), FALSE, &metadata, error)) goto out; if (!ostree_repo_write_commit_detached_metadata (pull_data->repo, checksum, metadata, pull_data->cancellable, error)) goto out; g_hash_table_add (pull_data->fetched_detached_metadata, g_strdup (checksum)); if (!fetch_data->object_is_stored) enqueue_one_object_request (pull_data, checksum, objtype, fetch_data->path, FALSE, FALSE, fetch_data->requested_ref); else queue_scan_one_metadata_object (pull_data, checksum, objtype, fetch_data->path, 0, fetch_data->requested_ref); } else { if (!ot_util_variant_map_fd (fd, 0, ostree_metadata_variant_type (objtype), FALSE, &metadata, error)) goto out; /* Write the commitpartial file now while we're still fetching data */ if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { if (!write_commitpartial_for (pull_data, checksum, error)) goto out; } ostree_repo_write_metadata_async (pull_data->repo, objtype, checksum, metadata, pull_data->cancellable, on_metadata_written, fetch_data); pull_data->n_outstanding_metadata_write_requests++; free_fetch_data = FALSE; } out: g_assert (pull_data->n_outstanding_metadata_fetches > 0); pull_data->n_outstanding_metadata_fetches--; pull_data->n_fetched_metadata++; check_outstanding_requests_handle_error (pull_data, &local_error); if (free_fetch_data) fetch_object_data_free (fetch_data); } static void fetch_static_delta_data_free (gpointer data) { FetchStaticDeltaData *fetch_data = data; g_free (fetch_data->expected_checksum); g_variant_unref (fetch_data->objects); g_free (fetch_data->from_revision); g_free (fetch_data->to_revision); g_free (fetch_data); } static void on_static_delta_written (GObject *object, GAsyncResult *result, gpointer user_data) { FetchStaticDeltaData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; g_debug ("execute static delta part %s complete", fetch_data->expected_checksum); if (!_ostree_static_delta_part_execute_finish (pull_data->repo, result, error)) goto out; out: g_assert (pull_data->n_outstanding_deltapart_write_requests > 0); pull_data->n_outstanding_deltapart_write_requests--; check_outstanding_requests_handle_error (pull_data, &local_error); /* Always free state */ fetch_static_delta_data_free (fetch_data); } static void static_deltapart_fetch_on_complete (GObject *object, GAsyncResult *result, gpointer user_data) { OstreeFetcher *fetcher = (OstreeFetcher *)object; FetchStaticDeltaData *fetch_data = user_data; OtPullData *pull_data = fetch_data->pull_data; g_autofree char *temp_path = NULL; g_autoptr(GInputStream) in = NULL; g_autoptr(GVariant) part = NULL; g_autoptr(GError) local_error = NULL; GError **error = &local_error; glnx_fd_close int fd = -1; gboolean free_fetch_data = TRUE; g_debug ("fetch static delta part %s complete", fetch_data->expected_checksum); if (!_ostree_fetcher_request_to_tmpfile_finish (fetcher, result, &temp_path, error)) goto out; if (!glnx_openat_rdonly (_ostree_fetcher_get_dfd (fetcher), temp_path, TRUE, &fd, error)) goto out; /* From here on, if we fail to apply the delta, we'll re-fetch it */ if (!glnx_unlinkat (_ostree_fetcher_get_dfd (fetcher), temp_path, 0, error)) goto out; in = g_unix_input_stream_new (fd, FALSE); /* TODO - make async */ if (!_ostree_static_delta_part_open (in, NULL, 0, fetch_data->expected_checksum, &part, pull_data->cancellable, error)) goto out; _ostree_static_delta_part_execute_async (pull_data->repo, fetch_data->objects, part, pull_data->cancellable, on_static_delta_written, fetch_data); pull_data->n_outstanding_deltapart_write_requests++; free_fetch_data = FALSE; out: g_assert (pull_data->n_outstanding_deltapart_fetches > 0); pull_data->n_outstanding_deltapart_fetches--; pull_data->n_fetched_deltaparts++; check_outstanding_requests_handle_error (pull_data, &local_error); if (free_fetch_data) fetch_static_delta_data_free (fetch_data); } static gboolean process_verify_result (OtPullData *pull_data, const char *checksum, OstreeGpgVerifyResult *result, GError **error) { const char *error_prefix = glnx_strjoina ("Commit ", checksum); GLNX_AUTO_PREFIX_ERROR(error_prefix, error); if (result == NULL) return FALSE; /* Allow callers to output the results immediately. */ g_signal_emit_by_name (pull_data->repo, "gpg-verify-result", checksum, result); if (!ostree_gpg_verify_result_require_valid_signature (result, error)) return FALSE; return TRUE; } static gboolean gpg_verify_unwritten_commit (OtPullData *pull_data, const char *checksum, GVariant *commit, GVariant *detached_metadata, GCancellable *cancellable, GError **error) { if (pull_data->gpg_verify) { g_autoptr(OstreeGpgVerifyResult) result = NULL; g_autoptr(GBytes) signed_data = g_variant_get_data_as_bytes (commit); if (!detached_metadata) { g_set_error (error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE, "Commit %s: no detached metadata found for GPG verification", checksum); return FALSE; } result = _ostree_repo_gpg_verify_with_metadata (pull_data->repo, signed_data, detached_metadata, pull_data->remote_name, NULL, NULL, cancellable, error); if (!process_verify_result (pull_data, checksum, result, error)) return FALSE; } return TRUE; } static gboolean commitstate_is_partial (OtPullData *pull_data, OstreeRepoCommitState commitstate) { return pull_data->legacy_transaction_resuming || (commitstate & OSTREE_REPO_COMMIT_STATE_PARTIAL) > 0; } #ifdef OSTREE_ENABLE_EXPERIMENTAL_API /* Reads the collection-id of a given remote from the repo * configuration. */ static char * get_real_remote_repo_collection_id (OstreeRepo *repo, const gchar *remote_name) { g_autofree gchar *remote_collection_id = NULL; if (!ostree_repo_get_remote_option (repo, remote_name, "collection-id", NULL, &remote_collection_id, NULL) || (remote_collection_id == NULL) || (remote_collection_id[0] == '\0')) return NULL; return g_steal_pointer (&remote_collection_id); } /* Reads the collection-id of the remote repo. Where it will be read * from depends on whether we pull from the "local" remote repo (the * "file://" URL) or "remote" remote repo (likely the "http(s)://" * URL). */ static char * get_remote_repo_collection_id (OtPullData *pull_data) { if (pull_data->remote_repo_local != NULL) { const char *remote_collection_id = ostree_repo_get_collection_id (pull_data->remote_repo_local); if ((remote_collection_id == NULL) || (remote_collection_id[0] == '\0')) return NULL; return g_strdup (remote_collection_id); } return get_real_remote_repo_collection_id (pull_data->repo, pull_data->remote_name); } #endif /* OSTREE_ENABLE_EXPERIMENTAL_API */ /* Verify the ref and collection bindings. * * The ref binding is verified only if it exists. But if we have the * collection ID specified in the remote configuration then the ref * binding must exist, otherwise the verification will fail. Parts of * the verification can be skipped by passing NULL to the requested_ref * parameter (in case we requested a checksum directly, without looking it up * from a ref). * * The collection binding is verified only when we have collection ID * specified in the remote configuration. If it is specified, then the * binding must exist and must be equal to the remote repository * collection ID. */ static gboolean verify_bindings (OtPullData *pull_data, GVariant *commit, const OstreeCollectionRef *requested_ref, GError **error) { g_autofree char *remote_collection_id = NULL; #ifdef OSTREE_ENABLE_EXPERIMENTAL_API remote_collection_id = get_remote_repo_collection_id (pull_data); #endif /* OSTREE_ENABLE_EXPERIMENTAL_API */ g_autoptr(GVariant) metadata = g_variant_get_child_value (commit, 0); g_autofree const char **refs = NULL; if (!g_variant_lookup (metadata, OSTREE_COMMIT_META_KEY_REF_BINDING, "^a&s", &refs)) { /* Early return here - if the remote collection ID is NULL, then * we certainly will not verify the collection binding in the * commit. */ if (remote_collection_id == NULL) return TRUE; return glnx_throw (error, "expected commit metadata to have ref " "binding information, found none"); } if (requested_ref != NULL) { if (!g_strv_contains ((const char *const *) refs, requested_ref->ref_name)) { g_autoptr(GString) refs_dump = g_string_new (NULL); const char *refs_str; if (refs != NULL && (*refs) != NULL) { for (const char **iter = refs; *iter != NULL; ++iter) { const char *ref = *iter; if (refs_dump->len > 0) g_string_append (refs_dump, ", "); g_string_append_printf (refs_dump, "‘%s’", ref); } refs_str = refs_dump->str; } else { refs_str = "no refs"; } return glnx_throw (error, "commit has no requested ref ‘%s’ " "in ref binding metadata (%s)", requested_ref->ref_name, refs_str); } } if (remote_collection_id != NULL) { #ifdef OSTREE_ENABLE_EXPERIMENTAL_API const char *collection_id; if (!g_variant_lookup (metadata, OSTREE_COMMIT_META_KEY_COLLECTION_BINDING, "&s", &collection_id)) return glnx_throw (error, "expected commit metadata to have collection ID " "binding information, found none"); if (!g_str_equal (collection_id, remote_collection_id)) return glnx_throw (error, "commit has collection ID ‘%s’ in collection binding " "metadata, while the remote it came from has " "collection ID ‘%s’", collection_id, remote_collection_id); #endif } return TRUE; } /* Look at a commit object, and determine whether there are * more things to fetch. */ static gboolean scan_commit_object (OtPullData *pull_data, const char *checksum, guint recursion_depth, const OstreeCollectionRef *ref, GCancellable *cancellable, GError **error) { gpointer depthp; gint depth; if (g_hash_table_lookup_extended (pull_data->commit_to_depth, checksum, NULL, &depthp)) { depth = GPOINTER_TO_INT (depthp); } else { depth = pull_data->maxdepth; g_hash_table_insert (pull_data->commit_to_depth, g_strdup (checksum), GINT_TO_POINTER (depth)); } if (pull_data->gpg_verify) { g_autoptr(OstreeGpgVerifyResult) result = NULL; result = ostree_repo_verify_commit_for_remote (pull_data->repo, checksum, pull_data->remote_name, cancellable, error); if (!process_verify_result (pull_data, checksum, result, error)) return FALSE; } /* If we found a legacy transaction flag, assume we have to scan. * We always do a scan of dirtree objects; see * https://github.com/ostreedev/ostree/issues/543 */ OstreeRepoCommitState commitstate; g_autoptr(GVariant) commit = NULL; if (!ostree_repo_load_commit (pull_data->repo, checksum, &commit, &commitstate, error)) return FALSE; /* If ref is non-NULL then the commit we fetched was requested through the * branch, otherwise we requested a commit checksum without specifying a branch. */ if (!verify_bindings (pull_data, commit, ref, error)) return glnx_prefix_error (error, "Commit %s", checksum); if (pull_data->timestamp_check) { /* We don't support timestamp checking while recursing right now */ g_assert (ref); g_assert_cmpint (recursion_depth, ==, 0); const char *orig_rev = NULL; if (!g_hash_table_lookup_extended (pull_data->ref_original_commits, ref, NULL, (void**)&orig_rev)) g_assert_not_reached (); g_autoptr(GVariant) orig_commit = NULL; if (orig_rev) { if (!ostree_repo_load_commit (pull_data->repo, orig_rev, &orig_commit, NULL, error)) return glnx_prefix_error (error, "Reading %s for timestamp-check", ref->ref_name); guint64 orig_ts = ostree_commit_get_timestamp (orig_commit); guint64 new_ts = ostree_commit_get_timestamp (commit); if (!_ostree_compare_timestamps (orig_rev, orig_ts, checksum, new_ts, error)) return FALSE; } } /* If we found a legacy transaction flag, assume all commits are partial */ gboolean is_partial = commitstate_is_partial (pull_data, commitstate); /* PARSE OSTREE_SERIALIZED_COMMIT_VARIANT */ g_autoptr(GVariant) parent_csum = NULL; const guchar *parent_csum_bytes = NULL; g_variant_get_child (commit, 1, "@ay", &parent_csum); if (g_variant_n_children (parent_csum) > 0) { parent_csum_bytes = ostree_checksum_bytes_peek_validate (parent_csum, error); if (parent_csum_bytes == NULL) return FALSE; } if (parent_csum_bytes != NULL && pull_data->maxdepth == -1) { queue_scan_one_metadata_object_c (pull_data, parent_csum_bytes, OSTREE_OBJECT_TYPE_COMMIT, NULL, recursion_depth + 1, NULL); } else if (parent_csum_bytes != NULL && depth > 0) { char parent_checksum[OSTREE_SHA256_STRING_LEN+1]; gpointer parent_depthp; int parent_depth; ostree_checksum_inplace_from_bytes (parent_csum_bytes, parent_checksum); if (g_hash_table_lookup_extended (pull_data->commit_to_depth, parent_checksum, NULL, &parent_depthp)) { parent_depth = GPOINTER_TO_INT (parent_depthp); } else { parent_depth = depth - 1; } if (parent_depth >= 0) { g_hash_table_insert (pull_data->commit_to_depth, g_strdup (parent_checksum), GINT_TO_POINTER (parent_depth)); queue_scan_one_metadata_object_c (pull_data, parent_csum_bytes, OSTREE_OBJECT_TYPE_COMMIT, NULL, recursion_depth + 1, NULL); } } /* We only recurse to looking whether we need dirtree/dirmeta * objects if the commit is partial, and we're not doing a * commit-only fetch. */ if (is_partial && !pull_data->is_commit_only) { g_autoptr(GVariant) tree_contents_csum = NULL; g_autoptr(GVariant) tree_meta_csum = NULL; const guchar *tree_contents_csum_bytes; const guchar *tree_meta_csum_bytes; g_variant_get_child (commit, 6, "@ay", &tree_contents_csum); g_variant_get_child (commit, 7, "@ay", &tree_meta_csum); tree_contents_csum_bytes = ostree_checksum_bytes_peek_validate (tree_contents_csum, error); if (tree_contents_csum_bytes == NULL) return FALSE; tree_meta_csum_bytes = ostree_checksum_bytes_peek_validate (tree_meta_csum, error); if (tree_meta_csum_bytes == NULL) return FALSE; queue_scan_one_metadata_object_c (pull_data, tree_contents_csum_bytes, OSTREE_OBJECT_TYPE_DIR_TREE, "/", recursion_depth + 1, NULL); queue_scan_one_metadata_object_c (pull_data, tree_meta_csum_bytes, OSTREE_OBJECT_TYPE_DIR_META, NULL, recursion_depth + 1, NULL); } return TRUE; } static void queue_scan_one_metadata_object (OtPullData *pull_data, const char *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref) { guchar buf[OSTREE_SHA256_DIGEST_LEN]; ostree_checksum_inplace_to_bytes (csum, buf); queue_scan_one_metadata_object_c (pull_data, buf, objtype, path, recursion_depth, ref); } static void queue_scan_one_metadata_object_c (OtPullData *pull_data, const guchar *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref) { ScanObjectQueueData *scan_data = g_new0 (ScanObjectQueueData, 1); memcpy (scan_data->csum, csum, sizeof (scan_data->csum)); scan_data->objtype = objtype; scan_data->path = g_strdup (path); scan_data->recursion_depth = recursion_depth; scan_data->requested_ref = (ref != NULL) ? ostree_collection_ref_dup (ref) : NULL; g_queue_push_tail (&pull_data->scan_object_queue, scan_data); ensure_idle_queued (pull_data); } static gboolean scan_one_metadata_object_c (OtPullData *pull_data, const guchar *csum, OstreeObjectType objtype, const char *path, guint recursion_depth, const OstreeCollectionRef *ref, GCancellable *cancellable, GError **error) { g_autofree char *tmp_checksum = ostree_checksum_from_bytes (csum); g_autoptr(GVariant) object = ostree_object_name_serialize (tmp_checksum, objtype); /* It may happen that we've already looked at this object (think shared * dirtree subtrees), if that's the case, we're done */ if (g_hash_table_lookup (pull_data->scanned_metadata, object)) return TRUE; gboolean is_requested = g_hash_table_lookup (pull_data->requested_metadata, object) != NULL; /* Determine if we already have the object */ gboolean is_stored; if (!ostree_repo_has_object (pull_data->repo, objtype, tmp_checksum, &is_stored, cancellable, error)) return FALSE; /* Are we pulling an object we don't have from a local repo? */ if (!is_stored && pull_data->remote_repo_local) { if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { /* mark as partial to ensure we scan the commit below */ if (!write_commitpartial_for (pull_data, tmp_checksum, error)) return FALSE; } if (!_ostree_repo_import_object (pull_data->repo, pull_data->remote_repo_local, objtype, tmp_checksum, pull_data->importflags, cancellable, error)) return FALSE; /* The import API will fetch both the commit and detached metadata, so * add it to the hash to avoid re-fetching it below. */ if (objtype == OSTREE_OBJECT_TYPE_COMMIT) g_hash_table_add (pull_data->fetched_detached_metadata, g_strdup (tmp_checksum)); is_stored = TRUE; is_requested = TRUE; } /* Do we have any localcache repos? */ else if (!is_stored && pull_data->localcache_repos) { for (guint i = 0; i < pull_data->localcache_repos->len; i++) { OstreeRepo *refd_repo = pull_data->localcache_repos->pdata[i]; gboolean localcache_repo_has_obj; if (!ostree_repo_has_object (refd_repo, objtype, tmp_checksum, &localcache_repo_has_obj, cancellable, error)) return FALSE; if (!localcache_repo_has_obj) continue; if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { /* mark as partial to ensure we scan the commit below */ if (!write_commitpartial_for (pull_data, tmp_checksum, error)) return FALSE; } if (!_ostree_repo_import_object (pull_data->repo, refd_repo, objtype, tmp_checksum, pull_data->importflags, cancellable, error)) return FALSE; /* See comment above */ if (objtype == OSTREE_OBJECT_TYPE_COMMIT) g_hash_table_add (pull_data->fetched_detached_metadata, g_strdup (tmp_checksum)); is_stored = TRUE; is_requested = TRUE; pull_data->n_fetched_localcache_metadata++; break; } } if (!is_stored && !is_requested) { gboolean do_fetch_detached; g_hash_table_add (pull_data->requested_metadata, g_variant_ref (object)); do_fetch_detached = (objtype == OSTREE_OBJECT_TYPE_COMMIT); enqueue_one_object_request (pull_data, tmp_checksum, objtype, path, do_fetch_detached, FALSE, ref); } else if (is_stored && objtype == OSTREE_OBJECT_TYPE_COMMIT) { /* Even though we already have the commit, we always try to (re)fetch the * detached metadata before scanning it, in case new signatures appear. * https://github.com/projectatomic/rpm-ostree/issues/630 */ if (!g_hash_table_contains (pull_data->fetched_detached_metadata, tmp_checksum)) enqueue_one_object_request (pull_data, tmp_checksum, objtype, path, TRUE, TRUE, ref); else { if (!scan_commit_object (pull_data, tmp_checksum, recursion_depth, ref, pull_data->cancellable, error)) return FALSE; g_hash_table_add (pull_data->scanned_metadata, g_variant_ref (object)); pull_data->n_scanned_metadata++; } } else if (is_stored && objtype == OSTREE_OBJECT_TYPE_DIR_TREE) { if (!scan_dirtree_object (pull_data, tmp_checksum, path, recursion_depth, pull_data->cancellable, error)) return FALSE; g_hash_table_add (pull_data->scanned_metadata, g_variant_ref (object)); pull_data->n_scanned_metadata++; } return TRUE; } static void enqueue_one_object_request (OtPullData *pull_data, const char *checksum, OstreeObjectType objtype, const char *path, gboolean is_detached_meta, gboolean object_is_stored, const OstreeCollectionRef *ref) { gboolean is_meta; FetchObjectData *fetch_data; is_meta = OSTREE_OBJECT_TYPE_IS_META (objtype); fetch_data = g_new0 (FetchObjectData, 1); fetch_data->pull_data = pull_data; fetch_data->object = ostree_object_name_serialize (checksum, objtype); fetch_data->path = g_strdup (path); fetch_data->is_detached_meta = is_detached_meta; fetch_data->object_is_stored = object_is_stored; fetch_data->requested_ref = (ref != NULL) ? ostree_collection_ref_dup (ref) : NULL; if (is_meta) pull_data->n_requested_metadata++; else pull_data->n_requested_content++; /* Are too many requests are in flight? */ if (fetcher_queue_is_full (pull_data)) { g_debug ("queuing fetch of %s.%s%s", checksum, ostree_object_type_to_string (objtype), is_detached_meta ? " (detached)" : ""); if (is_meta) { GVariant *objname = ostree_object_name_serialize (checksum, objtype); g_hash_table_insert (pull_data->pending_fetch_metadata, objname, fetch_data); } else { g_hash_table_insert (pull_data->pending_fetch_content, g_strdup (checksum), fetch_data); } } else { start_fetch (pull_data, fetch_data); } } static void start_fetch (OtPullData *pull_data, FetchObjectData *fetch) { gboolean is_meta; g_autofree char *obj_subpath = NULL; guint64 *expected_max_size_p; guint64 expected_max_size; const char *expected_checksum; OstreeObjectType objtype; GPtrArray *mirrorlist = NULL; ostree_object_name_deserialize (fetch->object, &expected_checksum, &objtype); is_meta = OSTREE_OBJECT_TYPE_IS_META (objtype); g_debug ("starting fetch of %s.%s%s", expected_checksum, ostree_object_type_to_string (objtype), fetch->is_detached_meta ? " (detached)" : ""); is_meta = OSTREE_OBJECT_TYPE_IS_META (objtype); if (is_meta) pull_data->n_outstanding_metadata_fetches++; else pull_data->n_outstanding_content_fetches++; OstreeFetcherRequestFlags flags = 0; /* Override the path if we're trying to fetch the .commitmeta file first */ if (fetch->is_detached_meta) { char buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (buf, expected_checksum, OSTREE_OBJECT_TYPE_COMMIT_META, pull_data->remote_mode); obj_subpath = g_build_filename ("objects", buf, NULL); mirrorlist = pull_data->meta_mirrorlist; flags |= OSTREE_FETCHER_REQUEST_OPTIONAL_CONTENT; } else { obj_subpath = _ostree_get_relative_object_path (expected_checksum, objtype, TRUE); mirrorlist = pull_data->content_mirrorlist; } /* We may have determined maximum sizes from the summary file content; if so, * honor it. Otherwise, metadata has a baseline max size. */ expected_max_size_p = fetch->is_detached_meta ? NULL : g_hash_table_lookup (pull_data->expected_commit_sizes, expected_checksum); if (expected_max_size_p) expected_max_size = *expected_max_size_p; else if (OSTREE_OBJECT_TYPE_IS_META (objtype)) expected_max_size = OSTREE_MAX_METADATA_SIZE; else expected_max_size = 0; _ostree_fetcher_request_to_tmpfile (pull_data->fetcher, mirrorlist, obj_subpath, flags, expected_max_size, is_meta ? OSTREE_REPO_PULL_METADATA_PRIORITY : OSTREE_REPO_PULL_CONTENT_PRIORITY, pull_data->cancellable, is_meta ? meta_fetch_on_complete : content_fetch_on_complete, fetch); } static gboolean load_remote_repo_config (OtPullData *pull_data, GKeyFile **out_keyfile, GCancellable *cancellable, GError **error) { g_autofree char *contents = NULL; if (!fetch_mirrored_uri_contents_utf8_sync (pull_data->fetcher, pull_data->meta_mirrorlist, "config", &contents, cancellable, error)) return FALSE; g_autoptr(GKeyFile) ret_keyfile = g_key_file_new (); if (!g_key_file_load_from_data (ret_keyfile, contents, strlen (contents), 0, error)) return glnx_prefix_error (error, "Parsing config"); ot_transfer_out_value (out_keyfile, &ret_keyfile); return TRUE; } static gboolean process_one_static_delta_fallback (OtPullData *pull_data, gboolean delta_byteswap, GVariant *fallback_object, GCancellable *cancellable, GError **error) { guint8 objtype_y; g_autoptr(GVariant) csum_v = NULL; guint64 compressed_size, uncompressed_size; g_variant_get (fallback_object, "(y@aytt)", &objtype_y, &csum_v, &compressed_size, &uncompressed_size); if (!ostree_validate_structureof_objtype (objtype_y, error)) return FALSE; if (!ostree_validate_structureof_csum_v (csum_v, error)) return FALSE; compressed_size = maybe_swap_endian_u64 (delta_byteswap, compressed_size); uncompressed_size = maybe_swap_endian_u64 (delta_byteswap, uncompressed_size); pull_data->n_total_delta_fallbacks += 1; pull_data->total_deltapart_size += compressed_size; pull_data->total_deltapart_usize += uncompressed_size; OstreeObjectType objtype = (OstreeObjectType)objtype_y; g_autofree char *checksum = ostree_checksum_from_bytes_v (csum_v); gboolean is_stored; if (!ostree_repo_has_object (pull_data->repo, objtype, checksum, &is_stored, cancellable, error)) return FALSE; if (is_stored) pull_data->fetched_deltapart_size += compressed_size; if (pull_data->dry_run) return TRUE; /* Note early return */ if (!is_stored) { /* The delta compiler never did this, there's no reason to support it */ if (OSTREE_OBJECT_TYPE_IS_META (objtype)) return glnx_throw (error, "Found metadata object as fallback: %s.%s", checksum, ostree_object_type_to_string (objtype)); else { if (!g_hash_table_lookup (pull_data->requested_content, checksum)) { /* Mark this as requested, like we do in the non-delta path */ g_hash_table_add (pull_data->requested_content, checksum); /* But also record it's a delta fallback object, so we can account * for it as logically part of the delta fetch. */ g_hash_table_add (pull_data->requested_fallback_content, g_strdup (checksum)); enqueue_one_object_request (pull_data, checksum, OSTREE_OBJECT_TYPE_FILE, NULL, FALSE, FALSE, NULL); checksum = NULL; /* We transferred ownership to the requested_content hash */ } } } return TRUE; } static void start_fetch_deltapart (OtPullData *pull_data, FetchStaticDeltaData *fetch) { g_autofree char *deltapart_path = _ostree_get_relative_static_delta_part_path (fetch->from_revision, fetch->to_revision, fetch->i); pull_data->n_outstanding_deltapart_fetches++; g_assert_cmpint (pull_data->n_outstanding_deltapart_fetches, <=, _OSTREE_MAX_OUTSTANDING_DELTAPART_REQUESTS); _ostree_fetcher_request_to_tmpfile (pull_data->fetcher, pull_data->content_mirrorlist, deltapart_path, 0, fetch->size, OSTREE_FETCHER_DEFAULT_PRIORITY, pull_data->cancellable, static_deltapart_fetch_on_complete, fetch); } static gboolean process_one_static_delta (OtPullData *pull_data, const char *from_revision, const char *to_revision, GVariant *delta_superblock, const OstreeCollectionRef *ref, GCancellable *cancellable, GError **error) { gboolean ret = FALSE; gboolean delta_byteswap; g_autoptr(GVariant) metadata = NULL; g_autoptr(GVariant) headers = NULL; g_autoptr(GVariant) fallback_objects = NULL; guint i, n; delta_byteswap = _ostree_delta_needs_byteswap (delta_superblock); /* Parsing OSTREE_STATIC_DELTA_SUPERBLOCK_FORMAT */ metadata = g_variant_get_child_value (delta_superblock, 0); headers = g_variant_get_child_value (delta_superblock, 6); fallback_objects = g_variant_get_child_value (delta_superblock, 7); /* Gather free space so we can do a check below */ struct statvfs stvfsbuf; if (TEMP_FAILURE_RETRY (fstatvfs (pull_data->repo->repo_dir_fd, &stvfsbuf)) < 0) return glnx_throw_errno_prefix (error, "fstatvfs"); /* First process the fallbacks */ n = g_variant_n_children (fallback_objects); for (i = 0; i < n; i++) { g_autoptr(GVariant) fallback_object = g_variant_get_child_value (fallback_objects, i); if (!process_one_static_delta_fallback (pull_data, delta_byteswap, fallback_object, cancellable, error)) goto out; } /* Write the to-commit object */ if (!pull_data->dry_run) { g_autoptr(GVariant) to_csum_v = NULL; g_autofree char *to_checksum = NULL; gboolean have_to_commit; to_csum_v = g_variant_get_child_value (delta_superblock, 3); if (!ostree_validate_structureof_csum_v (to_csum_v, error)) goto out; to_checksum = ostree_checksum_from_bytes_v (to_csum_v); if (!ostree_repo_has_object (pull_data->repo, OSTREE_OBJECT_TYPE_COMMIT, to_checksum, &have_to_commit, cancellable, error)) goto out; if (!have_to_commit) { FetchObjectData *fetch_data; g_autoptr(GVariant) to_commit = g_variant_get_child_value (delta_superblock, 4); g_autofree char *detached_path = _ostree_get_relative_static_delta_path (from_revision, to_revision, "commitmeta"); g_autoptr(GVariant) detached_data = NULL; detached_data = g_variant_lookup_value (metadata, detached_path, G_VARIANT_TYPE("a{sv}")); if (!gpg_verify_unwritten_commit (pull_data, to_revision, to_commit, detached_data, cancellable, error)) goto out; if (detached_data && !ostree_repo_write_commit_detached_metadata (pull_data->repo, to_revision, detached_data, cancellable, error)) goto out; fetch_data = g_new0 (FetchObjectData, 1); fetch_data->pull_data = pull_data; fetch_data->object = ostree_object_name_serialize (to_checksum, OSTREE_OBJECT_TYPE_COMMIT); fetch_data->is_detached_meta = FALSE; fetch_data->object_is_stored = FALSE; fetch_data->requested_ref = (ref != NULL) ? ostree_collection_ref_dup (ref) : NULL; ostree_repo_write_metadata_async (pull_data->repo, OSTREE_OBJECT_TYPE_COMMIT, to_checksum, to_commit, pull_data->cancellable, on_metadata_written, fetch_data); pull_data->n_outstanding_metadata_write_requests++; } } n = g_variant_n_children (headers); pull_data->n_total_deltaparts += n; for (i = 0; i < n; i++) { const guchar *csum; g_autoptr(GVariant) header = NULL; gboolean have_all = FALSE; g_autofree char *deltapart_path = NULL; FetchStaticDeltaData *fetch_data; g_autoptr(GVariant) csum_v = NULL; g_autoptr(GVariant) objects = NULL; g_autoptr(GBytes) inline_part_bytes = NULL; guint64 size, usize; guint32 version; header = g_variant_get_child_value (headers, i); g_variant_get (header, "(u@aytt@ay)", &version, &csum_v, &size, &usize, &objects); version = maybe_swap_endian_u32 (delta_byteswap, version); size = maybe_swap_endian_u64 (delta_byteswap, size); usize = maybe_swap_endian_u64 (delta_byteswap, usize); if (version > OSTREE_DELTAPART_VERSION) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Delta part has too new version %u", version); goto out; } csum = ostree_checksum_bytes_peek_validate (csum_v, error); if (!csum) goto out; if (!_ostree_repo_static_delta_part_have_all_objects (pull_data->repo, objects, &have_all, cancellable, error)) goto out; pull_data->total_deltapart_size += size; pull_data->total_deltapart_usize += usize; if (have_all) { g_debug ("Have all objects from static delta %s-%s part %u", from_revision ? from_revision : "empty", to_revision, i); pull_data->fetched_deltapart_size += size; pull_data->n_fetched_deltaparts++; continue; } deltapart_path = _ostree_get_relative_static_delta_part_path (from_revision, to_revision, i); { g_autoptr(GVariant) part_datav = g_variant_lookup_value (metadata, deltapart_path, G_VARIANT_TYPE ("(yay)")); if (part_datav) inline_part_bytes = g_variant_get_data_as_bytes (part_datav); } if (pull_data->dry_run) continue; fetch_data = g_new0 (FetchStaticDeltaData, 1); fetch_data->from_revision = g_strdup (from_revision); fetch_data->to_revision = g_strdup (to_revision); fetch_data->pull_data = pull_data; fetch_data->objects = g_variant_ref (objects); fetch_data->expected_checksum = ostree_checksum_from_bytes_v (csum_v); fetch_data->size = size; fetch_data->i = i; if (inline_part_bytes != NULL) { g_autoptr(GInputStream) memin = g_memory_input_stream_new_from_bytes (inline_part_bytes); g_autoptr(GVariant) inline_delta_part = NULL; /* For inline parts we are relying on per-commit GPG, so don't bother checksumming. */ if (!_ostree_static_delta_part_open (memin, inline_part_bytes, OSTREE_STATIC_DELTA_OPEN_FLAGS_SKIP_CHECKSUM, NULL, &inline_delta_part, cancellable, error)) goto out; _ostree_static_delta_part_execute_async (pull_data->repo, fetch_data->objects, inline_delta_part, pull_data->cancellable, on_static_delta_written, fetch_data); pull_data->n_outstanding_deltapart_write_requests++; } else { if (!fetcher_queue_is_full (pull_data)) start_fetch_deltapart (pull_data, fetch_data); else { g_hash_table_add (pull_data->pending_fetch_deltaparts, fetch_data); } } } /* The free space check is here since at this point we've parsed the delta not * only the total size of the parts, but also whether or not we already have * them. TODO: Ideally this free space check would be above, but we'd have to * walk everything twice and keep track of state. */ const guint64 delta_required_blocks = (pull_data->total_deltapart_usize / stvfsbuf.f_bsize); if (delta_required_blocks > stvfsbuf.f_bfree) { g_autofree char *formatted_required = g_format_size (pull_data->total_deltapart_usize); g_autofree char *formatted_avail = g_format_size (((guint64)stvfsbuf.f_bsize) * stvfsbuf.f_bfree); glnx_throw (error, "Delta requires %s free space, but only %s available", formatted_required, formatted_avail); goto out; } ret = TRUE; out: return ret; } /* Loop over the static delta data we got from the summary, * and find the newest commit for @out_from_revision that * goes to @to_revision. * * Additionally, @out_have_scratch_delta will be set to %TRUE * if there is a %NULL → @to_revision delta, also known as * a "from scratch" delta. */ static gboolean get_best_static_delta_start_for (OtPullData *pull_data, const char *to_revision, gboolean *out_have_scratch_delta, char **out_from_revision, GCancellable *cancellable, GError **error) { /* Array<char*> of possible from checksums */ g_autoptr(GPtrArray) candidates = g_ptr_array_new_with_free_func (g_free); const char *newest_candidate = NULL; guint64 newest_candidate_timestamp = 0; g_assert (pull_data->summary_deltas_checksums != NULL); *out_have_scratch_delta = FALSE; /* Loop over all deltas known from the summary file, * finding ones which go to to_revision */ GLNX_HASH_TABLE_FOREACH (pull_data->summary_deltas_checksums, const char*, delta_name) { g_autofree char *cur_from_rev = NULL; g_autofree char *cur_to_rev = NULL; /* Gracefully handle corrupted (or malicious) summary files */ if (!_ostree_parse_delta_name (delta_name, &cur_from_rev, &cur_to_rev, error)) return FALSE; /* Is this the checksum we want? */ if (strcmp (cur_to_rev, to_revision) != 0) continue; if (cur_from_rev) g_ptr_array_add (candidates, g_steal_pointer (&cur_from_rev)); else *out_have_scratch_delta = TRUE; } /* Loop over our candidates, find the newest one */ for (guint i = 0; i < candidates->len; i++) { const char *candidate = candidates->pdata[i]; guint64 candidate_ts = 0; g_autoptr(GVariant) commit = NULL; OstreeRepoCommitState state; gboolean have_candidate; /* Do we have this commit at all? If not, skip it */ if (!ostree_repo_has_object (pull_data->repo, OSTREE_OBJECT_TYPE_COMMIT, candidate, &have_candidate, NULL, error)) return FALSE; if (!have_candidate) continue; /* Load it */ if (!ostree_repo_load_commit (pull_data->repo, candidate, &commit, &state, error)) return FALSE; /* Ignore partial commits, we can't use them */ if (state & OSTREE_REPO_COMMIT_STATE_PARTIAL) continue; /* Is it newer? */ candidate_ts = ostree_commit_get_timestamp (commit); if (newest_candidate == NULL || candidate_ts > newest_candidate_timestamp) { newest_candidate = candidate; newest_candidate_timestamp = candidate_ts; } } *out_from_revision = g_strdup (newest_candidate); return TRUE; } typedef struct { OtPullData *pull_data; char *from_revision; char *to_revision; OstreeCollectionRef *requested_ref; /* (nullable) */ } FetchDeltaSuperData; static void set_required_deltas_error (GError **error, const char *from_revision, const char *to_revision) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Static deltas required, but none found for %s to %s", from_revision, to_revision); } static void on_superblock_fetched (GObject *src, GAsyncResult *res, gpointer data) { FetchDeltaSuperData *fdata = data; OtPullData *pull_data = fdata->pull_data; g_autoptr(GError) local_error = NULL; GError **error = &local_error; g_autoptr(GBytes) delta_superblock_data = NULL; const char *from_revision = fdata->from_revision; const char *to_revision = fdata->to_revision; if (!_ostree_fetcher_request_to_membuf_finish ((OstreeFetcher*)src, res, &delta_superblock_data, error)) { if (!g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) goto out; g_clear_error (&local_error); if (pull_data->require_static_deltas) { set_required_deltas_error (error, from_revision, to_revision); goto out; } queue_scan_one_metadata_object (pull_data, to_revision, OSTREE_OBJECT_TYPE_COMMIT, NULL, 0, fdata->requested_ref); } else { g_autofree gchar *delta = NULL; g_autofree guchar *ret_csum = NULL; guchar *summary_csum; g_autoptr (GInputStream) summary_is = NULL; g_autoptr(GVariant) delta_superblock = NULL; summary_is = g_memory_input_stream_new_from_data (g_bytes_get_data (delta_superblock_data, NULL), g_bytes_get_size (delta_superblock_data), NULL); if (!ot_gio_checksum_stream (summary_is, &ret_csum, pull_data->cancellable, error)) goto out; delta = g_strconcat (from_revision ? from_revision : "", from_revision ? "-" : "", to_revision, NULL); summary_csum = g_hash_table_lookup (pull_data->summary_deltas_checksums, delta); /* At this point we've GPG verified the data, so in theory * could trust that they provided the right data, but let's * make this a hard error. */ if (pull_data->gpg_verify_summary && !summary_csum) { g_set_error (error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE, "GPG verification enabled, but no summary signatures found (use gpg-verify-summary=false in remote config to disable)"); goto out; } if (summary_csum && memcmp (summary_csum, ret_csum, 32)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Invalid checksum for static delta %s", delta); goto out; } delta_superblock = g_variant_ref_sink (g_variant_new_from_bytes ((GVariantType*)OSTREE_STATIC_DELTA_SUPERBLOCK_FORMAT, delta_superblock_data, FALSE)); g_ptr_array_add (pull_data->static_delta_superblocks, g_variant_ref (delta_superblock)); if (!process_one_static_delta (pull_data, from_revision, to_revision, delta_superblock, fdata->requested_ref, pull_data->cancellable, error)) goto out; } out: g_free (fdata->from_revision); g_free (fdata->to_revision); if (fdata->requested_ref) ostree_collection_ref_free (fdata->requested_ref); g_free (fdata); g_assert (pull_data->n_outstanding_metadata_fetches > 0); pull_data->n_outstanding_metadata_fetches--; pull_data->n_fetched_metadata++; check_outstanding_requests_handle_error (pull_data, &local_error); } static gboolean validate_variant_is_csum (GVariant *csum, GError **error) { if (!g_variant_is_of_type (csum, G_VARIANT_TYPE ("ay"))) return glnx_throw (error, "Invalid checksum variant of type '%s', expected 'ay'", g_variant_get_type_string (csum)); return ostree_validate_structureof_csum_v (csum, error); } /* Load the summary from the cache if the provided .sig file is the same as the cached version. */ static gboolean _ostree_repo_load_cache_summary_if_same_sig (OstreeRepo *self, const char *remote, GBytes *summary_sig, GBytes **summary, GCancellable *cancellable, GError **error) { if (self->cache_dir_fd == -1) return TRUE; const char *summary_cache_sig_file = glnx_strjoina (_OSTREE_SUMMARY_CACHE_DIR, "/", remote, ".sig"); glnx_fd_close int prev_fd = -1; if (!ot_openat_ignore_enoent (self->cache_dir_fd, summary_cache_sig_file, &prev_fd, error)) return FALSE; if (prev_fd < 0) return TRUE; /* Note early return */ g_autoptr(GBytes) old_sig_contents = glnx_fd_readall_bytes (prev_fd, cancellable, error); if (!old_sig_contents) return FALSE; if (g_bytes_compare (old_sig_contents, summary_sig) == 0) { const char *summary_cache_file = glnx_strjoina (_OSTREE_SUMMARY_CACHE_DIR, "/", remote); glnx_fd_close int summary_fd = -1; GBytes *summary_data; summary_fd = openat (self->cache_dir_fd, summary_cache_file, O_CLOEXEC | O_RDONLY); if (summary_fd < 0) { if (errno == ENOENT) { (void) unlinkat (self->cache_dir_fd, summary_cache_sig_file, 0); return TRUE; /* Note early return */ } return glnx_throw_errno_prefix (error, "openat(%s)", summary_cache_file); } summary_data = glnx_fd_readall_bytes (summary_fd, cancellable, error); if (!summary_data) return FALSE; *summary = summary_data; } return TRUE; } /* Replace the current summary+signature with new versions */ static gboolean _ostree_repo_cache_summary (OstreeRepo *self, const char *remote, GBytes *summary, GBytes *summary_sig, GCancellable *cancellable, GError **error) { if (self->cache_dir_fd == -1) return TRUE; if (!glnx_shutil_mkdir_p_at (self->cache_dir_fd, _OSTREE_SUMMARY_CACHE_DIR, 0775, cancellable, error)) return FALSE; const char *summary_cache_file = glnx_strjoina (_OSTREE_SUMMARY_CACHE_DIR, "/", remote); if (!glnx_file_replace_contents_at (self->cache_dir_fd, summary_cache_file, g_bytes_get_data (summary, NULL), g_bytes_get_size (summary), self->disable_fsync ? GLNX_FILE_REPLACE_NODATASYNC : GLNX_FILE_REPLACE_DATASYNC_NEW, cancellable, error)) return FALSE; const char *summary_cache_sig_file = glnx_strjoina (_OSTREE_SUMMARY_CACHE_DIR, "/", remote, ".sig"); if (!glnx_file_replace_contents_at (self->cache_dir_fd, summary_cache_sig_file, g_bytes_get_data (summary_sig, NULL), g_bytes_get_size (summary_sig), self->disable_fsync ? GLNX_FILE_REPLACE_NODATASYNC : GLNX_FILE_REPLACE_DATASYNC_NEW, cancellable, error)) return FALSE; return TRUE; } static OstreeFetcher * _ostree_repo_remote_new_fetcher (OstreeRepo *self, const char *remote_name, gboolean gzip, OstreeFetcherSecurityState *out_state, GError **error) { OstreeFetcher *fetcher = NULL; OstreeFetcherConfigFlags fetcher_flags = 0; gboolean tls_permissive = FALSE; OstreeFetcherSecurityState ret_state = OSTREE_FETCHER_SECURITY_STATE_TLS; gboolean success = FALSE; g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); g_return_val_if_fail (remote_name != NULL, NULL); if (!ostree_repo_get_remote_boolean_option (self, remote_name, "tls-permissive", FALSE, &tls_permissive, error)) goto out; if (tls_permissive) { fetcher_flags |= OSTREE_FETCHER_FLAGS_TLS_PERMISSIVE; ret_state = OSTREE_FETCHER_SECURITY_STATE_INSECURE; } if (gzip) fetcher_flags |= OSTREE_FETCHER_FLAGS_TRANSFER_GZIP; fetcher = _ostree_fetcher_new (self->tmp_dir_fd, remote_name, fetcher_flags); { g_autofree char *tls_client_cert_path = NULL; g_autofree char *tls_client_key_path = NULL; if (!ostree_repo_get_remote_option (self, remote_name, "tls-client-cert-path", NULL, &tls_client_cert_path, error)) goto out; if (!ostree_repo_get_remote_option (self, remote_name, "tls-client-key-path", NULL, &tls_client_key_path, error)) goto out; if ((tls_client_cert_path != NULL) != (tls_client_key_path != NULL)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Remote \"%s\" must specify both " "\"tls-client-cert-path\" and \"tls-client-key-path\"", remote_name); goto out; } else if (tls_client_cert_path != NULL) { _ostree_fetcher_set_client_cert (fetcher, tls_client_cert_path, tls_client_key_path); } } { g_autofree char *tls_ca_path = NULL; if (!ostree_repo_get_remote_option (self, remote_name, "tls-ca-path", NULL, &tls_ca_path, error)) goto out; if (tls_ca_path != NULL) { _ostree_fetcher_set_tls_database (fetcher, tls_ca_path); /* Don't change if it's already _INSECURE */ if (ret_state == OSTREE_FETCHER_SECURITY_STATE_TLS) ret_state = OSTREE_FETCHER_SECURITY_STATE_CA_PINNED; } } { g_autofree char *http_proxy = NULL; if (!ostree_repo_get_remote_option (self, remote_name, "proxy", NULL, &http_proxy, error)) goto out; if (http_proxy != NULL) _ostree_fetcher_set_proxy (fetcher, http_proxy); } if (!_ostree_repo_remote_name_is_file (remote_name)) { g_autofree char *cookie_file = g_strdup_printf ("%s.cookies.txt", remote_name); /* TODO; port away from this; a bit hard since both libsoup and libcurl * expect a file. Doing ot_fdrel_to_gfile() works for now though. */ GFile*repo_path = ostree_repo_get_path (self); g_autofree char *jar_path = g_build_filename (gs_file_get_path_cached (repo_path), cookie_file, NULL); if (g_file_test (jar_path, G_FILE_TEST_IS_REGULAR)) _ostree_fetcher_set_cookie_jar (fetcher, jar_path); } success = TRUE; out: if (!success) g_clear_object (&fetcher); if (out_state) *out_state = ret_state; return fetcher; } static gboolean _ostree_preload_metadata_file (OstreeRepo *self, OstreeFetcher *fetcher, GPtrArray *mirrorlist, const char *filename, gboolean is_metalink, GBytes **out_bytes, GCancellable *cancellable, GError **error) { gboolean ret = FALSE; if (is_metalink) { g_autoptr(OstreeMetalink) metalink = NULL; GError *local_error = NULL; /* the metalink uri is buried in the mirrorlist as the first (and only) * element */ metalink = _ostree_metalink_new (fetcher, filename, OSTREE_MAX_METADATA_SIZE, mirrorlist->pdata[0]); _ostree_metalink_request_sync (metalink, NULL, out_bytes, cancellable, &local_error); if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) { g_clear_error (&local_error); *out_bytes = NULL; } else if (local_error != NULL) { g_propagate_error (error, local_error); goto out; } } else { ret = _ostree_fetcher_mirrored_request_to_membuf (fetcher, mirrorlist, filename, OSTREE_FETCHER_REQUEST_OPTIONAL_CONTENT, out_bytes, OSTREE_MAX_METADATA_SIZE, cancellable, error); if (!ret) goto out; } ret = TRUE; out: return ret; } static gboolean fetch_mirrorlist (OstreeFetcher *fetcher, const char *mirrorlist_url, GPtrArray **out_mirrorlist, GCancellable *cancellable, GError **error) { gboolean ret = FALSE; g_auto(GStrv) lines = NULL; g_autofree char *contents = NULL; g_autoptr(OstreeFetcherURI) mirrorlist = NULL; g_autoptr(GPtrArray) ret_mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); mirrorlist = _ostree_fetcher_uri_parse (mirrorlist_url, error); if (!mirrorlist) goto out; if (!fetch_uri_contents_utf8_sync (fetcher, mirrorlist, &contents, cancellable, error)) { g_prefix_error (error, "While fetching mirrorlist '%s': ", mirrorlist_url); goto out; } /* go through each mirror in mirrorlist and do a quick sanity check that it * works so that we don't waste the fetcher's time when it goes through them * */ lines = g_strsplit (contents, "\n", -1); g_debug ("Scanning mirrorlist from '%s'", mirrorlist_url); for (char **iter = lines; iter && *iter; iter++) { const char *mirror_uri_str = *iter; g_autoptr(OstreeFetcherURI) mirror_uri = NULL; g_autofree char *scheme = NULL; /* let's be nice and support empty lines and comments */ if (*mirror_uri_str == '\0' || *mirror_uri_str == '#') continue; mirror_uri = _ostree_fetcher_uri_parse (mirror_uri_str, NULL); if (!mirror_uri) { g_debug ("Can't parse mirrorlist line '%s'", mirror_uri_str); continue; } scheme = _ostree_fetcher_uri_get_scheme (mirror_uri); if (!(g_str_equal (scheme, "http") || (g_str_equal (scheme, "https")))) { /* let's not support mirrorlists that contain non-http based URIs for * now (e.g. local URIs) -- we need to think about if and how we want * to support this since we set up things differently depending on * whether we're pulling locally or not */ g_debug ("Ignoring non-http/s mirrorlist entry '%s'", mirror_uri_str); continue; } /* We keep sanity checking until we hit a working mirror; there's no need * to waste resources checking the remaining ones. At the same time, * guaranteeing that the first mirror in the list works saves the fetcher * time from always iterating through a few bad first mirrors. */ if (ret_mirrorlist->len == 0) { GError *local_error = NULL; g_autoptr(OstreeFetcherURI) config_uri = _ostree_fetcher_uri_new_subpath (mirror_uri, "config"); if (fetch_uri_contents_utf8_sync (fetcher, config_uri, NULL, cancellable, &local_error)) g_ptr_array_add (ret_mirrorlist, g_steal_pointer (&mirror_uri)); else { g_debug ("Failed to fetch config from mirror '%s': %s", mirror_uri_str, local_error->message); g_clear_error (&local_error); } } else { g_ptr_array_add (ret_mirrorlist, g_steal_pointer (&mirror_uri)); } } if (ret_mirrorlist->len == 0) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "No valid mirrors were found in mirrorlist '%s'", mirrorlist_url); goto out; } *out_mirrorlist = g_steal_pointer (&ret_mirrorlist); ret = TRUE; out: return ret; } static gboolean repo_remote_fetch_summary (OstreeRepo *self, const char *name, const char *metalink_url_string, GVariant *options, GBytes **out_summary, GBytes **out_signatures, GCancellable *cancellable, GError **error) { g_autoptr(OstreeFetcher) fetcher = NULL; g_autoptr(GMainContext) mainctx = NULL; gboolean ret = FALSE; gboolean from_cache = FALSE; const char *url_override = NULL; g_autoptr(GVariant) extra_headers = NULL; g_autoptr(GPtrArray) mirrorlist = NULL; if (options) { (void) g_variant_lookup (options, "override-url", "&s", &url_override); (void) g_variant_lookup (options, "http-headers", "@a(ss)", &extra_headers); } mainctx = g_main_context_new (); g_main_context_push_thread_default (mainctx); fetcher = _ostree_repo_remote_new_fetcher (self, name, TRUE, NULL, error); if (fetcher == NULL) goto out; if (extra_headers) _ostree_fetcher_set_extra_headers (fetcher, extra_headers); { g_autofree char *url_string = NULL; if (metalink_url_string) url_string = g_strdup (metalink_url_string); else if (url_override) url_string = g_strdup (url_override); else if (!ostree_repo_remote_get_url (self, name, &url_string, error)) goto out; if (metalink_url_string == NULL && g_str_has_prefix (url_string, "mirrorlist=")) { if (!fetch_mirrorlist (fetcher, url_string + strlen ("mirrorlist="), &mirrorlist, cancellable, error)) goto out; } else { g_autoptr(OstreeFetcherURI) uri = _ostree_fetcher_uri_parse (url_string, error); if (!uri) goto out; mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); g_ptr_array_add (mirrorlist, g_steal_pointer (&uri)); } } /* FIXME: Send the ETag from the cache with the request for summary.sig to * avoid downloading summary.sig unnecessarily. This won’t normally provide * any benefits (but won’t do any harm) since summary.sig is typically 500B * in size. But if a repository has multiple keys, the signature file will * grow and this optimisation may be useful. */ if (!_ostree_preload_metadata_file (self, fetcher, mirrorlist, "summary.sig", metalink_url_string ? TRUE : FALSE, out_signatures, cancellable, error)) goto out; if (*out_signatures) { if (!_ostree_repo_load_cache_summary_if_same_sig (self, name, *out_signatures, out_summary, cancellable, error)) goto out; } if (*out_summary) from_cache = TRUE; else { if (!_ostree_preload_metadata_file (self, fetcher, mirrorlist, "summary", metalink_url_string ? TRUE : FALSE, out_summary, cancellable, error)) goto out; } if (!from_cache && *out_summary && *out_signatures) { g_autoptr(GError) temp_error = NULL; if (!_ostree_repo_cache_summary (self, name, *out_summary, *out_signatures, cancellable, &temp_error)) { if (g_error_matches (temp_error, G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED)) g_debug ("No permissions to save summary cache"); else { g_propagate_error (error, g_steal_pointer (&temp_error)); goto out; } } } ret = TRUE; out: if (mainctx) g_main_context_pop_thread_default (mainctx); return ret; } /* Create the fetcher by unioning options from the remote config, plus * any options specific to this pull (such as extra headers). */ static gboolean reinitialize_fetcher (OtPullData *pull_data, const char *remote_name, GError **error) { g_clear_object (&pull_data->fetcher); pull_data->fetcher = _ostree_repo_remote_new_fetcher (pull_data->repo, remote_name, FALSE, &pull_data->fetcher_security_state, error); if (pull_data->fetcher == NULL) return FALSE; if (pull_data->extra_headers) _ostree_fetcher_set_extra_headers (pull_data->fetcher, pull_data->extra_headers); return TRUE; } /* Start a request for a static delta */ static void initiate_delta_request (OtPullData *pull_data, const char *from_revision, const char *to_revision, const OstreeCollectionRef *ref) { g_autofree char *delta_name = _ostree_get_relative_static_delta_superblock_path (from_revision, to_revision); FetchDeltaSuperData *fdata = g_new0(FetchDeltaSuperData, 1); fdata->pull_data = pull_data; fdata->from_revision = g_strdup (from_revision); fdata->to_revision = g_strdup (to_revision); fdata->requested_ref = (ref != NULL) ? ostree_collection_ref_dup (ref) : NULL; _ostree_fetcher_request_to_membuf (pull_data->fetcher, pull_data->content_mirrorlist, delta_name, 0, OSTREE_MAX_METADATA_SIZE, 0, pull_data->cancellable, on_superblock_fetched, fdata); pull_data->n_outstanding_metadata_fetches++; pull_data->n_requested_metadata++; } /* * initiate_request: * @ref: Optional ref name and collection ID * @to_revision: Target commit revision we want to fetch * * Start a request for either a ref or a commit. In the * ref case, we know both the name and the target commit. * * This function primarily handles the semantics around * `disable_static_deltas` and `require_static_deltas`. */ static gboolean initiate_request (OtPullData *pull_data, const OstreeCollectionRef *ref, const char *to_revision, GError **error) { g_autofree char *delta_from_revision = NULL; /* Are deltas disabled? OK, just start an object fetch and be done */ if (pull_data->disable_static_deltas) { queue_scan_one_metadata_object (pull_data, to_revision, OSTREE_OBJECT_TYPE_COMMIT, NULL, 0, ref); return TRUE; } /* If we have a summary, we can use the newer logic */ if (pull_data->summary) { gboolean have_scratch_delta = FALSE; /* Look for a delta to @to_revision in the summary data */ if (!get_best_static_delta_start_for (pull_data, to_revision, &have_scratch_delta, &delta_from_revision, pull_data->cancellable, error)) return FALSE; if (delta_from_revision) /* Did we find a delta FROM commit? */ initiate_delta_request (pull_data, delta_from_revision, to_revision, ref); else if (have_scratch_delta) /* No delta FROM, do we have a scratch? */ initiate_delta_request (pull_data, NULL, to_revision, ref); else if (pull_data->require_static_deltas) /* No deltas found; are they required? */ { set_required_deltas_error (error, (ref != NULL) ? ref->ref_name : "", to_revision); return FALSE; } else /* No deltas, fall back to object fetches. */ queue_scan_one_metadata_object (pull_data, to_revision, OSTREE_OBJECT_TYPE_COMMIT, NULL, 0, ref); } else if (ref != NULL) { /* Are we doing a delta via a ref? In that case we can fall back to the older * logic of just using the current tip of the ref as a delta FROM source. */ g_autofree char *refspec = NULL; if (pull_data->remote_name != NULL) refspec = g_strdup_printf ("%s:%s", pull_data->remote_name, ref->ref_name); if (!ostree_repo_resolve_rev (pull_data->repo, (refspec != NULL) ? refspec : ref->ref_name, TRUE, &delta_from_revision, error)) return FALSE; /* Determine whether the from revision we have is partial; this * can happen if e.g. one uses `ostree pull --commit-metadata-only`. * This mirrors the logic in get_best_static_delta_start_for(). */ if (delta_from_revision) { OstreeRepoCommitState from_commitstate; if (!ostree_repo_load_commit (pull_data->repo, delta_from_revision, NULL, &from_commitstate, error)) return FALSE; /* Was it partial? Then we can't use it. */ if (commitstate_is_partial (pull_data, from_commitstate)) g_clear_pointer (&delta_from_revision, g_free); } /* This is similar to the below, except we *might* use the previous * commit, or we might do a scratch delta first. */ initiate_delta_request (pull_data, delta_from_revision ?: NULL, to_revision, ref); } else { /* Legacy path without a summary file - let's try a scratch delta, if that * doesn't work, it'll drop down to object requests. */ initiate_delta_request (pull_data, NULL, to_revision, NULL); } return TRUE; } /* ------------------------------------------------------------------------------------------ * Below is the libsoup-invariant API; these should match * the stub functions in the #else clause * ------------------------------------------------------------------------------------------ */ /** * ostree_repo_pull_with_options: * @self: Repo * @remote_name_or_baseurl: Name of remote or file:// url * @options: A GVariant a{sv} with an extensible set of flags. * @progress: (allow-none): Progress * @cancellable: Cancellable * @error: Error * * Like ostree_repo_pull(), but supports an extensible set of flags. * The following are currently defined: * * * refs (as): Array of string refs * * collection-refs (a(sss)): Array of (collection ID, ref name, checksum) tuples to pull; * mutually exclusive with `refs` and `override-commit-ids`. Checksums may be the empty * string to pull the latest commit for that ref * * flags (i): An instance of #OstreeRepoPullFlags * * subdir (s): Pull just this subdirectory * * subdirs (as): Pull just these subdirectories * * override-remote-name (s): If local, add this remote to refspec * * gpg-verify (b): GPG verify commits * * gpg-verify-summary (b): GPG verify summary * * depth (i): How far in the history to traverse; default is 0, -1 means infinite * * disable-static-deltas (b): Do not use static deltas * * require-static-deltas (b): Require static deltas * * override-commit-ids (as): Array of specific commit IDs to fetch for refs * * timestamp-check (b): Verify commit timestamps are newer than current (when pulling via ref); Since: 2017.11 * * dry-run (b): Only print information on what will be downloaded (requires static deltas) * * override-url (s): Fetch objects from this URL if remote specifies no metalink in options * * inherit-transaction (b): Don't initiate, finish or abort a transaction, useful to do multiple pulls in one transaction. * * http-headers (a(ss)): Additional headers to add to all HTTP requests * * update-frequency (u): Frequency to call the async progress callback in milliseconds, if any; only values higher than 0 are valid * * localcache-repos (as): File paths for local repos to use as caches when doing remote fetches */ gboolean ostree_repo_pull_with_options (OstreeRepo *self, const char *remote_name_or_baseurl, GVariant *options, OstreeAsyncProgress *progress, GCancellable *cancellable, GError **error) { gboolean ret = FALSE; g_autoptr(GBytes) bytes_summary = NULL; g_autofree char *metalink_url_str = NULL; g_autoptr(GHashTable) requested_refs_to_fetch = NULL; /* (element-type OstreeCollectionRef utf8) */ g_autoptr(GHashTable) commits_to_fetch = NULL; g_autofree char *remote_mode_str = NULL; g_autoptr(OstreeMetalink) metalink = NULL; OtPullData pull_data_real = { 0, }; OtPullData *pull_data = &pull_data_real; GKeyFile *remote_config = NULL; char **configured_branches = NULL; guint64 bytes_transferred; guint64 end_time; guint update_frequency = 0; OstreeRepoPullFlags flags = 0; const char *dir_to_pull = NULL; g_autofree char **dirs_to_pull = NULL; g_autofree char **refs_to_fetch = NULL; g_autoptr(GVariantIter) collection_refs_iter = NULL; g_autofree char **override_commit_ids = NULL; GSource *update_timeout = NULL; gboolean opt_gpg_verify_set = FALSE; gboolean opt_gpg_verify_summary_set = FALSE; gboolean opt_collection_refs_set = FALSE; const char *main_collection_id = NULL; const char *url_override = NULL; gboolean inherit_transaction = FALSE; g_autoptr(GHashTable) updated_requested_refs_to_fetch = NULL; /* (element-type OstreeCollectionRef utf8) */ int i; g_autofree char **opt_localcache_repos = NULL; /* If refs or collection-refs has exactly one value, this will point to that * value, otherwise NULL. Used for logging. */ const char *the_ref_to_fetch = NULL; if (options) { int flags_i = OSTREE_REPO_PULL_FLAGS_NONE; (void) g_variant_lookup (options, "refs", "^a&s", &refs_to_fetch); opt_collection_refs_set = g_variant_lookup (options, "collection-refs", "a(sss)", &collection_refs_iter); (void) g_variant_lookup (options, "flags", "i", &flags_i); /* Reduce risk of issues if enum happens to be 64 bit for some reason */ flags = flags_i; (void) g_variant_lookup (options, "subdir", "&s", &dir_to_pull); (void) g_variant_lookup (options, "subdirs", "^a&s", &dirs_to_pull); (void) g_variant_lookup (options, "override-remote-name", "s", &pull_data->remote_name); opt_gpg_verify_set = g_variant_lookup (options, "gpg-verify", "b", &pull_data->gpg_verify); opt_gpg_verify_summary_set = g_variant_lookup (options, "gpg-verify-summary", "b", &pull_data->gpg_verify_summary); (void) g_variant_lookup (options, "depth", "i", &pull_data->maxdepth); (void) g_variant_lookup (options, "disable-static-deltas", "b", &pull_data->disable_static_deltas); (void) g_variant_lookup (options, "require-static-deltas", "b", &pull_data->require_static_deltas); (void) g_variant_lookup (options, "override-commit-ids", "^a&s", &override_commit_ids); (void) g_variant_lookup (options, "dry-run", "b", &pull_data->dry_run); (void) g_variant_lookup (options, "override-url", "&s", &url_override); (void) g_variant_lookup (options, "inherit-transaction", "b", &inherit_transaction); (void) g_variant_lookup (options, "http-headers", "@a(ss)", &pull_data->extra_headers); (void) g_variant_lookup (options, "update-frequency", "u", &update_frequency); (void) g_variant_lookup (options, "localcache-repos", "^a&s", &opt_localcache_repos); (void) g_variant_lookup (options, "timestamp-check", "b", &pull_data->timestamp_check); } g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (pull_data->maxdepth >= -1, FALSE); g_return_val_if_fail (!pull_data->timestamp_check || pull_data->maxdepth == 0, FALSE); g_return_val_if_fail (!opt_collection_refs_set || (refs_to_fetch == NULL && override_commit_ids == NULL), FALSE); if (refs_to_fetch && override_commit_ids) g_return_val_if_fail (g_strv_length (refs_to_fetch) == g_strv_length (override_commit_ids), FALSE); if (dir_to_pull) g_return_val_if_fail (dir_to_pull[0] == '/', FALSE); for (i = 0; dirs_to_pull != NULL && dirs_to_pull[i] != NULL; i++) g_return_val_if_fail (dirs_to_pull[i][0] == '/', FALSE); g_return_val_if_fail (!(pull_data->disable_static_deltas && pull_data->require_static_deltas), FALSE); /* We only do dry runs with static deltas, because we don't really have any * in-advance information for bare fetches. */ g_return_val_if_fail (!pull_data->dry_run || pull_data->require_static_deltas, FALSE); pull_data->is_mirror = (flags & OSTREE_REPO_PULL_FLAGS_MIRROR) > 0; pull_data->is_commit_only = (flags & OSTREE_REPO_PULL_FLAGS_COMMIT_ONLY) > 0; /* See our processing of OSTREE_REPO_PULL_FLAGS_UNTRUSTED below */ if ((flags & OSTREE_REPO_PULL_FLAGS_BAREUSERONLY_FILES) > 0) pull_data->importflags |= _OSTREE_REPO_IMPORT_FLAGS_VERIFY_BAREUSERONLY; pull_data->cancellable = cancellable ? g_object_ref (cancellable) : NULL; if (error) pull_data->async_error = &pull_data->cached_async_error; else pull_data->async_error = NULL; pull_data->main_context = g_main_context_ref_thread_default (); pull_data->flags = flags; pull_data->repo = self; pull_data->progress = progress; pull_data->expected_commit_sizes = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)g_free); pull_data->commit_to_depth = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, NULL); pull_data->summary_deltas_checksums = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)g_free); pull_data->ref_original_commits = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify)NULL, (GDestroyNotify)g_variant_unref); pull_data->scanned_metadata = g_hash_table_new_full (ostree_hash_object_name, g_variant_equal, (GDestroyNotify)g_variant_unref, NULL); pull_data->fetched_detached_metadata = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, NULL); pull_data->requested_content = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, NULL); pull_data->requested_fallback_content = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, NULL); pull_data->requested_metadata = g_hash_table_new_full (ostree_hash_object_name, g_variant_equal, (GDestroyNotify)g_variant_unref, NULL); pull_data->pending_fetch_content = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)fetch_object_data_free); pull_data->pending_fetch_metadata = g_hash_table_new_full (ostree_hash_object_name, g_variant_equal, (GDestroyNotify)g_variant_unref, (GDestroyNotify)fetch_object_data_free); pull_data->pending_fetch_deltaparts = g_hash_table_new_full (NULL, NULL, (GDestroyNotify)fetch_static_delta_data_free, NULL); if (opt_localcache_repos && *opt_localcache_repos) { pull_data->localcache_repos = g_ptr_array_new_with_free_func (g_object_unref); for (char **it = opt_localcache_repos; it && *it; it++) { const char *localcache_path = *it; g_autoptr(GFile) localcache_file = g_file_new_for_path (localcache_path); g_autoptr(OstreeRepo) cacherepo = ostree_repo_new (localcache_file); if (!ostree_repo_open (cacherepo, cancellable, error)) goto out; g_ptr_array_add (pull_data->localcache_repos, g_steal_pointer (&cacherepo)); } } if (dir_to_pull != NULL || dirs_to_pull != NULL) { pull_data->dirs = g_ptr_array_new_with_free_func (g_free); if (dir_to_pull != NULL) g_ptr_array_add (pull_data->dirs, g_strdup (dir_to_pull)); if (dirs_to_pull != NULL) { for (i = 0; dirs_to_pull[i] != NULL; i++) g_ptr_array_add (pull_data->dirs, g_strdup (dirs_to_pull[i])); } } g_queue_init (&pull_data->scan_object_queue); pull_data->start_time = g_get_monotonic_time (); if (_ostree_repo_remote_name_is_file (remote_name_or_baseurl)) { /* For compatibility with pull-local, don't gpg verify local * pulls by default. */ if ((pull_data->gpg_verify || pull_data->gpg_verify_summary) && pull_data->remote_name == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Must specify remote name to enable gpg verification"); goto out; } } else { g_autofree char *unconfigured_state = NULL; pull_data->remote_name = g_strdup (remote_name_or_baseurl); /* Fetch GPG verification settings from remote if it wasn't already * explicitly set in the options. */ if (!opt_gpg_verify_set) if (!ostree_repo_remote_get_gpg_verify (self, pull_data->remote_name, &pull_data->gpg_verify, error)) goto out; if (!opt_gpg_verify_summary_set) if (!ostree_repo_remote_get_gpg_verify_summary (self, pull_data->remote_name, &pull_data->gpg_verify_summary, error)) goto out; /* NOTE: If changing this, see the matching implementation in * ostree-sysroot-upgrader.c */ if (!ostree_repo_get_remote_option (self, pull_data->remote_name, "unconfigured-state", NULL, &unconfigured_state, error)) goto out; if (unconfigured_state) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "remote unconfigured-state: %s", unconfigured_state); goto out; } } pull_data->phase = OSTREE_PULL_PHASE_FETCHING_REFS; if (!reinitialize_fetcher (pull_data, remote_name_or_baseurl, error)) goto out; pull_data->tmpdir_dfd = pull_data->repo->tmp_dir_fd; requested_refs_to_fetch = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); commits_to_fetch = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); if (!ostree_repo_get_remote_option (self, remote_name_or_baseurl, "metalink", NULL, &metalink_url_str, error)) goto out; if (!metalink_url_str) { g_autofree char *baseurl = NULL; if (url_override != NULL) baseurl = g_strdup (url_override); else if (!ostree_repo_remote_get_url (self, remote_name_or_baseurl, &baseurl, error)) goto out; if (g_str_has_prefix (baseurl, "mirrorlist=")) { if (!fetch_mirrorlist (pull_data->fetcher, baseurl + strlen ("mirrorlist="), &pull_data->meta_mirrorlist, cancellable, error)) goto out; } else { g_autoptr(OstreeFetcherURI) baseuri = _ostree_fetcher_uri_parse (baseurl, error); if (!baseuri) goto out; pull_data->meta_mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); g_ptr_array_add (pull_data->meta_mirrorlist, g_steal_pointer (&baseuri)); } } else { g_autoptr(GBytes) summary_bytes = NULL; g_autoptr(OstreeFetcherURI) metalink_uri = _ostree_fetcher_uri_parse (metalink_url_str, error); g_autoptr(OstreeFetcherURI) target_uri = NULL; if (!metalink_uri) goto out; metalink = _ostree_metalink_new (pull_data->fetcher, "summary", OSTREE_MAX_METADATA_SIZE, metalink_uri); if (! _ostree_metalink_request_sync (metalink, &target_uri, &summary_bytes, cancellable, error)) goto out; /* XXX: would be interesting to implement metalink as another source of * mirrors here since we use it as such anyway (rather than the "usual" * use case of metalink, which is only for a single target filename) */ { g_autofree char *path = _ostree_fetcher_uri_get_path (target_uri); g_autofree char *basepath = g_path_get_dirname (path); g_autoptr(OstreeFetcherURI) new_target_uri = _ostree_fetcher_uri_new_path (target_uri, basepath); pull_data->meta_mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); g_ptr_array_add (pull_data->meta_mirrorlist, g_steal_pointer (&new_target_uri)); } pull_data->summary = g_variant_new_from_bytes (OSTREE_SUMMARY_GVARIANT_FORMAT, summary_bytes, FALSE); } { g_autofree char *contenturl = NULL; if (metalink_url_str == NULL && url_override != NULL) contenturl = g_strdup (url_override); else if (!ostree_repo_get_remote_option (self, remote_name_or_baseurl, "contenturl", NULL, &contenturl, error)) goto out; if (contenturl == NULL) { pull_data->content_mirrorlist = g_ptr_array_ref (pull_data->meta_mirrorlist); } else { if (g_str_has_prefix (contenturl, "mirrorlist=")) { if (!fetch_mirrorlist (pull_data->fetcher, contenturl + strlen ("mirrorlist="), &pull_data->content_mirrorlist, cancellable, error)) goto out; } else { g_autoptr(OstreeFetcherURI) contenturi = _ostree_fetcher_uri_parse (contenturl, error); if (!contenturi) goto out; pull_data->content_mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); g_ptr_array_add (pull_data->content_mirrorlist, g_steal_pointer (&contenturi)); } } } /* FIXME: Do we want an analogue of this which supports collection IDs? */ if (!ostree_repo_get_remote_list_option (self, remote_name_or_baseurl, "branches", &configured_branches, error)) goto out; /* TODO reindent later */ { OstreeFetcherURI *first_uri = pull_data->meta_mirrorlist->pdata[0]; g_autofree char *first_scheme = _ostree_fetcher_uri_get_scheme (first_uri); /* NB: we don't support local mirrors in mirrorlists, so if this passes, it * means that we're not using mirrorlists (see also fetch_mirrorlist()) */ if (g_str_equal (first_scheme, "file")) { g_autofree char *path = _ostree_fetcher_uri_get_path (first_uri); g_autoptr(GFile) remote_repo_path = g_file_new_for_path (path); pull_data->remote_repo_local = ostree_repo_new (remote_repo_path); if (!ostree_repo_open (pull_data->remote_repo_local, cancellable, error)) goto out; } else { if (!load_remote_repo_config (pull_data, &remote_config, cancellable, error)) goto out; if (!ot_keyfile_get_value_with_default (remote_config, "core", "mode", "bare", &remote_mode_str, error)) goto out; if (!ostree_repo_mode_from_string (remote_mode_str, &pull_data->remote_mode, error)) goto out; if (!ot_keyfile_get_boolean_with_default (remote_config, "core", "tombstone-commits", FALSE, &pull_data->has_tombstone_commits, error)) goto out; if (pull_data->remote_mode != OSTREE_REPO_MODE_ARCHIVE) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Can't pull from archives with mode \"%s\"", remote_mode_str); goto out; } } } /* Change some option defaults if we're actually pulling from a local * (filesystem accessible) repo. */ if (pull_data->remote_repo_local) { /* For local pulls, default to disabling static deltas so that the * exact object files are copied. */ if (!pull_data->require_static_deltas) pull_data->disable_static_deltas = TRUE; /* Note the inversion here; PULL_FLAGS_UNTRUSTED is converted to * IMPORT_FLAGS_TRUSTED only if it's unset (and just for local repos). */ if ((flags & OSTREE_REPO_PULL_FLAGS_UNTRUSTED) == 0) pull_data->importflags |= _OSTREE_REPO_IMPORT_FLAGS_TRUSTED; } else { /* For non-local repos, we require the TRUSTED_HTTP pull flag to map to * the TRUSTED object import flag. In practice we don't do object imports * for HTTP, but it's easiest to use one set of flags between HTTP and * local imports. */ if (flags & OSTREE_REPO_PULL_FLAGS_TRUSTED_HTTP) pull_data->importflags |= _OSTREE_REPO_IMPORT_FLAGS_TRUSTED; } /* We can't use static deltas if pulling into an archive repo. */ if (self->mode == OSTREE_REPO_MODE_ARCHIVE) { if (pull_data->require_static_deltas) { g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Can't use static deltas in an archive repo"); goto out; } pull_data->disable_static_deltas = TRUE; } /* It's not efficient to use static deltas if all we want is the commit * metadata. */ if (pull_data->is_commit_only) pull_data->disable_static_deltas = TRUE; pull_data->static_delta_superblocks = g_ptr_array_new_with_free_func ((GDestroyNotify)g_variant_unref); { g_autoptr(GBytes) bytes_sig = NULL; gsize i, n; g_autoptr(GVariant) refs = NULL; g_autoptr(GVariant) deltas = NULL; g_autoptr(GVariant) additional_metadata = NULL; gboolean summary_from_cache = FALSE; if (!pull_data->summary_data_sig) { if (!_ostree_fetcher_mirrored_request_to_membuf (pull_data->fetcher, pull_data->meta_mirrorlist, "summary.sig", OSTREE_FETCHER_REQUEST_OPTIONAL_CONTENT, &bytes_sig, OSTREE_MAX_METADATA_SIZE, cancellable, error)) goto out; } if (bytes_sig && !pull_data->remote_repo_local && !_ostree_repo_load_cache_summary_if_same_sig (self, remote_name_or_baseurl, bytes_sig, &bytes_summary, cancellable, error)) goto out; if (bytes_summary) summary_from_cache = TRUE; if (!pull_data->summary && !bytes_summary) { if (!_ostree_fetcher_mirrored_request_to_membuf (pull_data->fetcher, pull_data->meta_mirrorlist, "summary", OSTREE_FETCHER_REQUEST_OPTIONAL_CONTENT, &bytes_summary, OSTREE_MAX_METADATA_SIZE, cancellable, error)) goto out; } if (!bytes_summary && pull_data->gpg_verify_summary) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "GPG verification enabled, but no summary found (use gpg-verify-summary=false in remote config to disable)"); goto out; } if (!bytes_summary && pull_data->require_static_deltas) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "Fetch configured to require static deltas, but no summary found"); goto out; } if (!bytes_sig && pull_data->gpg_verify_summary) { g_set_error (error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE, "GPG verification enabled, but no summary.sig found (use gpg-verify-summary=false in remote config to disable)"); goto out; } if (pull_data->gpg_verify_summary && bytes_summary && bytes_sig) { g_autoptr(OstreeGpgVerifyResult) result = NULL; result = ostree_repo_verify_summary (self, pull_data->remote_name, bytes_summary, bytes_sig, cancellable, error); if (!ostree_gpg_verify_result_require_valid_signature (result, error)) goto out; } if (bytes_summary) { pull_data->summary_data = g_bytes_ref (bytes_summary); pull_data->summary = g_variant_new_from_bytes (OSTREE_SUMMARY_GVARIANT_FORMAT, bytes_summary, FALSE); if (!g_variant_is_normal_form (pull_data->summary)) { g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Not normal form"); goto out; } if (!g_variant_is_of_type (pull_data->summary, OSTREE_SUMMARY_GVARIANT_FORMAT)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Doesn't match variant type '%s'", (char *)OSTREE_SUMMARY_GVARIANT_FORMAT); goto out; } if (bytes_sig) pull_data->summary_data_sig = g_bytes_ref (bytes_sig); } if (!summary_from_cache && bytes_summary && bytes_sig) { if (!pull_data->remote_repo_local && !_ostree_repo_cache_summary (self, remote_name_or_baseurl, bytes_summary, bytes_sig, cancellable, error)) goto out; } if (pull_data->summary) { additional_metadata = g_variant_get_child_value (pull_data->summary, 1); if (!g_variant_lookup (additional_metadata, OSTREE_SUMMARY_COLLECTION_ID, "&s", &main_collection_id)) main_collection_id = NULL; else if (!ostree_validate_collection_id (main_collection_id, error)) goto out; refs = g_variant_get_child_value (pull_data->summary, 0); for (i = 0, n = g_variant_n_children (refs); i < n; i++) { const char *refname; g_autoptr(GVariant) ref = g_variant_get_child_value (refs, i); g_variant_get_child (ref, 0, "&s", &refname); if (!ostree_validate_rev (refname, error)) goto out; if (pull_data->is_mirror && !refs_to_fetch && !opt_collection_refs_set) { g_hash_table_insert (requested_refs_to_fetch, ostree_collection_ref_new (main_collection_id, refname), NULL); } } g_autoptr(GVariant) collection_map = NULL; collection_map = g_variant_lookup_value (additional_metadata, OSTREE_SUMMARY_COLLECTION_MAP, G_VARIANT_TYPE ("a{sa(s(taya{sv}))}")); if (collection_map != NULL) { GVariantIter collection_map_iter; const char *collection_id; g_autoptr(GVariant) collection_refs = NULL; g_variant_iter_init (&collection_map_iter, collection_map); while (g_variant_iter_loop (&collection_map_iter, "{&s@a(s(taya{sv}))}", &collection_id, &collection_refs)) { if (!ostree_validate_collection_id (collection_id, error)) goto out; for (i = 0, n = g_variant_n_children (collection_refs); i < n; i++) { const char *refname; g_autoptr(GVariant) ref = g_variant_get_child_value (collection_refs, i); g_variant_get_child (ref, 0, "&s", &refname); if (!ostree_validate_rev (refname, error)) goto out; if (pull_data->is_mirror && !refs_to_fetch && !opt_collection_refs_set) { g_hash_table_insert (requested_refs_to_fetch, ostree_collection_ref_new (collection_id, refname), NULL); } } } } deltas = g_variant_lookup_value (additional_metadata, OSTREE_SUMMARY_STATIC_DELTAS, G_VARIANT_TYPE ("a{sv}")); n = deltas ? g_variant_n_children (deltas) : 0; for (i = 0; i < n; i++) { const char *delta; g_autoptr(GVariant) csum_v = NULL; guchar *csum_data = g_malloc (OSTREE_SHA256_DIGEST_LEN); g_autoptr(GVariant) ref = g_variant_get_child_value (deltas, i); g_variant_get_child (ref, 0, "&s", &delta); g_variant_get_child (ref, 1, "v", &csum_v); if (!validate_variant_is_csum (csum_v, error)) goto out; memcpy (csum_data, ostree_checksum_bytes_peek (csum_v), 32); g_hash_table_insert (pull_data->summary_deltas_checksums, g_strdup (delta), csum_data); } } } if (pull_data->is_mirror && !refs_to_fetch && !opt_collection_refs_set && !configured_branches) { if (!bytes_summary) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "Fetching all refs was requested in mirror mode, but remote repository does not have a summary"); goto out; } } else if (opt_collection_refs_set) { const gchar *collection_id, *ref_name, *checksum; while (g_variant_iter_loop (collection_refs_iter, "(&s&s&s)", &collection_id, &ref_name, &checksum)) { g_hash_table_insert (requested_refs_to_fetch, ostree_collection_ref_new (collection_id, ref_name), (*checksum != '\0') ? g_strdup (checksum) : NULL); } } else if (refs_to_fetch != NULL) { char **strviter = refs_to_fetch; char **commitid_strviter = override_commit_ids ? override_commit_ids : NULL; while (*strviter) { const char *branch = *strviter; if (ostree_validate_checksum_string (branch, NULL)) { char *key = g_strdup (branch); g_hash_table_add (commits_to_fetch, key); } else { if (!ostree_validate_rev (branch, error)) goto out; char *commitid = commitid_strviter ? g_strdup (*commitid_strviter) : NULL; g_hash_table_insert (requested_refs_to_fetch, ostree_collection_ref_new (NULL, branch), commitid); } strviter++; if (commitid_strviter) commitid_strviter++; } } else { char **branches_iter; branches_iter = configured_branches; if (!(branches_iter && *branches_iter)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, "No configured branches for remote %s", remote_name_or_baseurl); goto out; } for (;branches_iter && *branches_iter; branches_iter++) { const char *branch = *branches_iter; g_hash_table_insert (requested_refs_to_fetch, ostree_collection_ref_new (NULL, branch), NULL); } } /* Resolve the checksum for each ref. This has to be done into a new hash table, * since we can’t modify the keys of @requested_refs_to_fetch while iterating * over it, and we need to ensure the collection IDs are resolved too. */ updated_requested_refs_to_fetch = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); GLNX_HASH_TABLE_FOREACH_KV (requested_refs_to_fetch, const OstreeCollectionRef*, ref, const char*, override_commitid) { g_autofree char *contents = NULL; /* Support specifying "" for an override commitid */ if (override_commitid && *override_commitid) { g_hash_table_replace (updated_requested_refs_to_fetch, ostree_collection_ref_dup (ref), g_strdup (override_commitid)); } else { g_autoptr(OstreeCollectionRef) ref_with_collection = NULL; if (pull_data->summary) { gsize commit_size = 0; guint64 *malloced_size; g_autofree gchar *collection_id = NULL; if (!lookup_commit_checksum_and_collection_from_summary (pull_data, ref, &contents, &commit_size, &collection_id, error)) goto out; ref_with_collection = ostree_collection_ref_new (collection_id, ref->ref_name); malloced_size = g_new0 (guint64, 1); *malloced_size = commit_size; g_hash_table_insert (pull_data->expected_commit_sizes, g_strdup (contents), malloced_size); } else { if (!fetch_ref_contents (pull_data, main_collection_id, ref, &contents, cancellable, error)) goto out; ref_with_collection = ostree_collection_ref_dup (ref); } /* If we have timestamp checking enabled, find the current value of * the ref, and store its timestamp in the hash map, to check later. */ if (pull_data->timestamp_check) { g_autofree char *from_rev = NULL; if (!ostree_repo_resolve_rev (pull_data->repo, ref_with_collection->ref_name, TRUE, &from_rev, error)) goto out; /* Explicitly store NULL if there's no previous revision. We do * this so we can assert() if we somehow didn't find a ref in the * hash at all. Note we don't copy the collection-ref, so the * lifetime of this hash must be equal to `requested_refs_to_fetch`. */ g_hash_table_insert (pull_data->ref_original_commits, ref_with_collection, g_steal_pointer (&from_rev)); } g_hash_table_replace (updated_requested_refs_to_fetch, g_steal_pointer (&ref_with_collection), g_steal_pointer (&contents)); } } g_hash_table_unref (requested_refs_to_fetch); requested_refs_to_fetch = g_steal_pointer (&updated_requested_refs_to_fetch); if (g_hash_table_size (requested_refs_to_fetch) == 1) { GLNX_HASH_TABLE_FOREACH (requested_refs_to_fetch, const OstreeCollectionRef *, ref) { the_ref_to_fetch = ref->ref_name; break; } } /* Create the state directory here - it's new with the commitpartial code, * and may not exist in older repositories. */ if (mkdirat (pull_data->repo->repo_dir_fd, "state", 0777) != 0) { if (G_UNLIKELY (errno != EEXIST)) { glnx_set_error_from_errno (error); goto out; } } pull_data->phase = OSTREE_PULL_PHASE_FETCHING_OBJECTS; /* Now discard the previous fetcher, as it was bound to a temporary main context * for synchronous requests. */ if (!reinitialize_fetcher (pull_data, remote_name_or_baseurl, error)) goto out; pull_data->legacy_transaction_resuming = FALSE; if (!inherit_transaction && !ostree_repo_prepare_transaction (pull_data->repo, &pull_data->legacy_transaction_resuming, cancellable, error)) goto out; if (pull_data->legacy_transaction_resuming) g_debug ("resuming legacy transaction"); /* Initiate requests for explicit commit revisions */ GLNX_HASH_TABLE_FOREACH_V (commits_to_fetch, const char*, commit) { if (!initiate_request (pull_data, NULL, commit, error)) goto out; } /* Initiate requests for refs */ GLNX_HASH_TABLE_FOREACH_KV (requested_refs_to_fetch, const OstreeCollectionRef*, ref, const char*, to_revision) { if (!initiate_request (pull_data, ref, to_revision, error)) goto out; } if (pull_data->progress) { /* Setup a custom frequency if set */ if (update_frequency > 0) update_timeout = g_timeout_source_new (pull_data->dry_run ? 0 : update_frequency); else update_timeout = g_timeout_source_new_seconds (pull_data->dry_run ? 0 : 1); g_source_set_priority (update_timeout, G_PRIORITY_HIGH); g_source_set_callback (update_timeout, update_progress, pull_data, NULL); g_source_attach (update_timeout, pull_data->main_context); g_source_unref (update_timeout); } /* Now await work completion */ while (!pull_termination_condition (pull_data)) g_main_context_iteration (pull_data->main_context, TRUE); if (pull_data->caught_error) goto out; if (pull_data->dry_run) { ret = TRUE; goto out; } g_assert_cmpint (pull_data->n_outstanding_metadata_fetches, ==, 0); g_assert_cmpint (pull_data->n_outstanding_metadata_write_requests, ==, 0); g_assert_cmpint (pull_data->n_outstanding_content_fetches, ==, 0); g_assert_cmpint (pull_data->n_outstanding_content_write_requests, ==, 0); GLNX_HASH_TABLE_FOREACH_KV (requested_refs_to_fetch, const OstreeCollectionRef*, ref, const char*, checksum) { g_autofree char *remote_ref = NULL; g_autofree char *original_rev = NULL; if (pull_data->remote_name) remote_ref = g_strdup_printf ("%s:%s", pull_data->remote_name, ref->ref_name); else remote_ref = g_strdup (ref->ref_name); if (!ostree_repo_resolve_rev (pull_data->repo, remote_ref, TRUE, &original_rev, error)) goto out; if (original_rev && strcmp (checksum, original_rev) == 0) { } else { if (pull_data->is_mirror) ostree_repo_transaction_set_collection_ref (pull_data->repo, ref, checksum); else ostree_repo_transaction_set_ref (pull_data->repo, pull_data->remote_name, ref->ref_name, checksum); } } if (pull_data->is_mirror && pull_data->summary_data && !refs_to_fetch && !configured_branches) { GLnxFileReplaceFlags replaceflag = pull_data->repo->disable_fsync ? GLNX_FILE_REPLACE_NODATASYNC : 0; gsize len; const guint8 *buf = g_bytes_get_data (pull_data->summary_data, &len); if (!glnx_file_replace_contents_at (pull_data->repo->repo_dir_fd, "summary", buf, len, replaceflag, cancellable, error)) goto out; if (pull_data->summary_data_sig) { buf = g_bytes_get_data (pull_data->summary_data_sig, &len); if (!glnx_file_replace_contents_at (pull_data->repo->repo_dir_fd, "summary.sig", buf, len, replaceflag, cancellable, error)) goto out; } } if (!inherit_transaction && !ostree_repo_commit_transaction (pull_data->repo, NULL, cancellable, error)) goto out; end_time = g_get_monotonic_time (); bytes_transferred = _ostree_fetcher_bytes_transferred (pull_data->fetcher); if (bytes_transferred > 0 && pull_data->progress) { guint shift; g_autoptr(GString) buf = g_string_new (""); /* Ensure the rest of the progress keys are set appropriately. */ update_progress (pull_data); if (bytes_transferred < 1024) shift = 1; else shift = 1024; if (pull_data->n_fetched_deltaparts > 0) g_string_append_printf (buf, "%u delta parts, %u loose fetched", pull_data->n_fetched_deltaparts, pull_data->n_fetched_metadata + pull_data->n_fetched_content); else g_string_append_printf (buf, "%u metadata, %u content objects fetched", pull_data->n_fetched_metadata, pull_data->n_fetched_content); if (pull_data->n_fetched_localcache_metadata || pull_data->n_fetched_localcache_content) g_string_append_printf (buf, " (%u meta, %u content local)", pull_data->n_fetched_localcache_metadata, pull_data->n_fetched_localcache_content); g_string_append_printf (buf, "; %" G_GUINT64_FORMAT " %s transferred in %u seconds", (guint64)(bytes_transferred / shift), shift == 1 ? "B" : "KiB", (guint) ((end_time - pull_data->start_time) / G_USEC_PER_SEC)); ostree_async_progress_set_status (pull_data->progress, buf->str); } #ifdef HAVE_LIBSYSTEMD if (bytes_transferred > 0 && pull_data->remote_name) { g_autoptr(GString) msg = g_string_new (""); if (the_ref_to_fetch) g_string_append_printf (msg, "libostree pull from '%s' for %s complete", pull_data->remote_name, the_ref_to_fetch); else g_string_append_printf (msg, "libostree pull from '%s' for %u refs complete", pull_data->remote_name, g_hash_table_size (requested_refs_to_fetch)); const char *gpg_verify_state; if (pull_data->gpg_verify_summary) { if (pull_data->gpg_verify) gpg_verify_state = "summary+commit"; else gpg_verify_state = "summary-only"; } else gpg_verify_state = (pull_data->gpg_verify ? "commit" : "disabled"); g_string_append_printf (msg, "\nsecurity: GPG: %s ", gpg_verify_state); OstreeFetcherURI *first_uri = pull_data->meta_mirrorlist->pdata[0]; g_autofree char *first_scheme = _ostree_fetcher_uri_get_scheme (first_uri); if (g_str_has_prefix (first_scheme, "http")) { g_string_append (msg, "http: "); switch (pull_data->fetcher_security_state) { case OSTREE_FETCHER_SECURITY_STATE_CA_PINNED: g_string_append (msg, "CA-pinned"); break; case OSTREE_FETCHER_SECURITY_STATE_TLS: g_string_append (msg, "TLS"); break; case OSTREE_FETCHER_SECURITY_STATE_INSECURE: g_string_append (msg, "insecure"); break; } } g_string_append (msg, "\n"); if (pull_data->n_fetched_deltaparts > 0) g_string_append_printf (msg, "delta: parts: %u loose: %u", pull_data->n_fetched_deltaparts, pull_data->n_fetched_metadata + pull_data->n_fetched_content); else g_string_append_printf (msg, "non-delta: meta: %u content: %u", pull_data->n_fetched_metadata, pull_data->n_fetched_content); const guint n_seconds = (guint) ((end_time - pull_data->start_time) / G_USEC_PER_SEC); g_autofree char *formatted_xferred = g_format_size (bytes_transferred); g_string_append_printf (msg, "\ntransfer: secs: %u size: %s", n_seconds, formatted_xferred); sd_journal_send ("MESSAGE=%s", msg->str, "MESSAGE_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(OSTREE_MESSAGE_FETCH_COMPLETE_ID), "OSTREE_REMOTE=%s", pull_data->remote_name, "OSTREE_GPG=%s", gpg_verify_state, "OSTREE_SECONDS=%u", n_seconds, "OSTREE_XFER_SIZE=%s", formatted_xferred, NULL); } #endif /* iterate over commits fetched and delete any commitpartial files */ if (pull_data->dirs == NULL && !pull_data->is_commit_only) { GLNX_HASH_TABLE_FOREACH_V (requested_refs_to_fetch, const char*, checksum) { g_autofree char *commitpartial_path = _ostree_get_commitpartial_path (checksum); if (!ot_ensure_unlinked_at (pull_data->repo->repo_dir_fd, commitpartial_path, 0)) goto out; } GLNX_HASH_TABLE_FOREACH_V (commits_to_fetch, const char*, commit) { g_autofree char *commitpartial_path = _ostree_get_commitpartial_path (commit); if (!ot_ensure_unlinked_at (pull_data->repo->repo_dir_fd, commitpartial_path, 0)) goto out; } } ret = TRUE; out: /* This is pretty ugly - we have two error locations, because we * have a mix of synchronous and async code. Mixing them gets messy * as we need to avoid overwriting errors. */ if (pull_data->cached_async_error && error && !*error) g_propagate_error (error, pull_data->cached_async_error); else g_clear_error (&pull_data->cached_async_error); if (!inherit_transaction) ostree_repo_abort_transaction (pull_data->repo, cancellable, NULL); g_main_context_unref (pull_data->main_context); if (update_timeout) g_source_destroy (update_timeout); g_strfreev (configured_branches); g_clear_object (&pull_data->fetcher); g_clear_pointer (&pull_data->extra_headers, (GDestroyNotify)g_variant_unref); g_clear_object (&pull_data->cancellable); g_clear_pointer (&pull_data->localcache_repos, (GDestroyNotify)g_ptr_array_unref); g_clear_object (&pull_data->remote_repo_local); g_free (pull_data->remote_name); g_clear_pointer (&pull_data->meta_mirrorlist, (GDestroyNotify) g_ptr_array_unref); g_clear_pointer (&pull_data->content_mirrorlist, (GDestroyNotify) g_ptr_array_unref); g_clear_pointer (&pull_data->summary_data, (GDestroyNotify) g_bytes_unref); g_clear_pointer (&pull_data->summary_data_sig, (GDestroyNotify) g_bytes_unref); g_clear_pointer (&pull_data->summary, (GDestroyNotify) g_variant_unref); g_clear_pointer (&pull_data->static_delta_superblocks, (GDestroyNotify) g_ptr_array_unref); g_clear_pointer (&pull_data->commit_to_depth, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->expected_commit_sizes, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->scanned_metadata, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->fetched_detached_metadata, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->summary_deltas_checksums, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->ref_original_commits, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->requested_content, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->requested_fallback_content, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->requested_metadata, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->pending_fetch_content, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->pending_fetch_metadata, (GDestroyNotify) g_hash_table_unref); g_clear_pointer (&pull_data->pending_fetch_deltaparts, (GDestroyNotify) g_hash_table_unref); g_queue_foreach (&pull_data->scan_object_queue, (GFunc) scan_object_queue_data_free, NULL); g_queue_clear (&pull_data->scan_object_queue); g_clear_pointer (&pull_data->idle_src, (GDestroyNotify) g_source_destroy); g_clear_pointer (&pull_data->dirs, (GDestroyNotify) g_ptr_array_unref); g_clear_pointer (&remote_config, (GDestroyNotify) g_key_file_unref); return ret; } #ifdef OSTREE_ENABLE_EXPERIMENTAL_API /* Structure used in ostree_repo_find_remotes_async() which stores metadata * about a given OSTree commit. This includes the metadata from the commit * #GVariant, plus some working state which is used to work out which remotes * have refs pointing to this commit. */ typedef struct { gchar *checksum; /* always set */ guint64 commit_size; /* always set */ guint64 timestamp; /* 0 for unknown */ GVariant *additional_metadata; GArray *refs; /* (element-type gsize), indexes to refs which point to this commit on at least one remote */ } CommitMetadata; static void commit_metadata_free (CommitMetadata *info) { g_clear_pointer (&info->refs, g_array_unref); g_free (info->checksum); g_clear_pointer (&info->additional_metadata, g_variant_unref); g_free (info); } G_DEFINE_AUTOPTR_CLEANUP_FUNC (CommitMetadata, commit_metadata_free) static CommitMetadata * commit_metadata_new (const gchar *checksum, guint64 commit_size, guint64 timestamp, GVariant *additional_metadata) { g_autoptr(CommitMetadata) info = NULL; info = g_new0 (CommitMetadata, 1); info->checksum = g_strdup (checksum); info->commit_size = commit_size; info->timestamp = timestamp; info->additional_metadata = (additional_metadata != NULL) ? g_variant_ref (additional_metadata) : NULL; info->refs = g_array_new (FALSE, FALSE, sizeof (gsize)); return g_steal_pointer (&info); } /* Structure used in ostree_repo_find_remotes_async() to store a grid (or table) * of pointers, indexed by rows and columns. Basically an encapsulated 2D array. * See the comments in ostree_repo_find_remotes_async() for its semantics * there. */ typedef struct { gsize width; /* pointers */ gsize height; /* pointers */ gconstpointer pointers[]; /* n_pointers = width * height */ } PointerTable; static void pointer_table_free (PointerTable *table) { g_free (table); } G_DEFINE_AUTOPTR_CLEANUP_FUNC (PointerTable, pointer_table_free) /* Both dimensions are in numbers of pointers. */ static PointerTable * pointer_table_new (gsize width, gsize height) { g_autoptr(PointerTable) table = NULL; g_return_val_if_fail (width > 0, NULL); g_return_val_if_fail (height > 0, NULL); g_return_val_if_fail (width <= (G_MAXSIZE - sizeof (PointerTable)) / sizeof (gconstpointer) / height, NULL); table = g_malloc0 (sizeof (PointerTable) + sizeof (gconstpointer) * width * height); table->width = width; table->height = height; return g_steal_pointer (&table); } static gconstpointer pointer_table_get (const PointerTable *table, gsize x, gsize y) { g_return_val_if_fail (table != NULL, FALSE); g_return_val_if_fail (x < table->width, FALSE); g_return_val_if_fail (y < table->height, FALSE); return table->pointers[table->width * y + x]; } static void pointer_table_set (PointerTable *table, gsize x, gsize y, gconstpointer value) { g_return_if_fail (table != NULL); g_return_if_fail (x < table->width); g_return_if_fail (y < table->height); table->pointers[table->width * y + x] = value; } /* Validate the given struct contains a valid collection ID and ref name. */ static gboolean is_valid_collection_ref (const OstreeCollectionRef *ref) { return (ref != NULL && ostree_validate_rev (ref->ref_name, NULL) && ostree_validate_collection_id (ref->collection_id, NULL)); } /* Validate @refs is non-%NULL, non-empty, and contains only valid collection * and ref names. */ static gboolean is_valid_collection_ref_array (const OstreeCollectionRef * const *refs) { gsize i; if (refs == NULL || *refs == NULL) return FALSE; for (i = 0; refs[i] != NULL; i++) { if (!is_valid_collection_ref (refs[i])) return FALSE; } return TRUE; } /* Validate @finders is non-%NULL, non-empty, and contains only valid * #OstreeRepoFinder instances. */ static gboolean is_valid_finder_array (OstreeRepoFinder **finders) { gsize i; if (finders == NULL || *finders == NULL) return FALSE; for (i = 0; finders[i] != NULL; i++) { if (!OSTREE_IS_REPO_FINDER (finders[i])) return FALSE; } return TRUE; } /* Closure used to carry inputs from ostree_repo_find_remotes_async() to * find_remotes_cb(). */ typedef struct { OstreeCollectionRef **refs; GVariant *options; OstreeAsyncProgress *progress; OstreeRepoFinder *default_finder_avahi; } FindRemotesData; static void find_remotes_data_free (FindRemotesData *data) { g_clear_object (&data->default_finder_avahi); g_clear_object (&data->progress); g_clear_pointer (&data->options, g_variant_unref); ostree_collection_ref_freev (data->refs); g_free (data); } G_DEFINE_AUTOPTR_CLEANUP_FUNC (FindRemotesData, find_remotes_data_free) static FindRemotesData * find_remotes_data_new (const OstreeCollectionRef * const *refs, GVariant *options, OstreeAsyncProgress *progress, OstreeRepoFinder *default_finder_avahi) { g_autoptr(FindRemotesData) data = NULL; data = g_new0 (FindRemotesData, 1); data->refs = ostree_collection_ref_dupv (refs); data->options = (options != NULL) ? g_variant_ref (options) : NULL; data->progress = (progress != NULL) ? g_object_ref (progress) : NULL; data->default_finder_avahi = (default_finder_avahi != NULL) ? g_object_ref (default_finder_avahi) : NULL; return g_steal_pointer (&data); } static gchar * uint64_secs_to_iso8601 (guint64 secs) { g_autoptr(GDateTime) dt = g_date_time_new_from_unix_utc (secs); if (dt != NULL) return g_date_time_format (dt, "%FT%TZ"); else return g_strdup ("invalid"); } static gint sort_results_cb (gconstpointer a, gconstpointer b) { const OstreeRepoFinderResult **result_a = (const OstreeRepoFinderResult **) a; const OstreeRepoFinderResult **result_b = (const OstreeRepoFinderResult **) b; return ostree_repo_finder_result_compare (*result_a, *result_b); } static void repo_finder_result_free0 (OstreeRepoFinderResult *result) { if (result == NULL) return; ostree_repo_finder_result_free (result); } static void find_remotes_cb (GObject *obj, GAsyncResult *result, gpointer user_data); /** * ostree_repo_find_remotes_async: * @self: an #OstreeRepo * @refs: (array zero-terminated=1): non-empty array of collection–ref pairs to find remotes for * @options: (nullable): a GVariant `a{sv}` with an extensible set of flags * @finders: (array zero-terminated=1) (transfer none): non-empty array of * #OstreeRepoFinder instances to use, or %NULL to use the system defaults * @progress: (nullable): an #OstreeAsyncProgress to update with the operation’s * progress, or %NULL * @cancellable: (nullable): a #GCancellable, or %NULL * @callback: asynchronous completion callback * @user_data: data to pass to @callback * * Find reachable remote URIs which claim to provide any of the given named * @refs. This will search for configured remotes (#OstreeRepoFinderConfig), * mounted volumes (#OstreeRepoFinderMount) and (if enabled at compile time) * local network peers (#OstreeRepoFinderAvahi). In order to use a custom * configuration of #OstreeRepoFinder instances, call * ostree_repo_finder_resolve_all_async() on them individually. * * Any remote which is found and which claims to support any of the given @refs * will be returned in the results. It is possible that a remote claims to * support a given ref, but turns out not to — it is not possible to verify this * until ostree_repo_pull_from_remotes_async() is called. * * The returned results will be sorted with the most useful first — this is * typically the remote which claims to provide the most of @refs, at the lowest * latency. * * Each result contains a list of the subset of @refs it claims to provide. It * is possible for a non-empty list of results to be returned, but for some of * @refs to not be listed in any of the results. Callers must check for this. * * Pass the results to ostree_repo_pull_from_remotes_async() to pull the given @refs * from those remotes. * * No @options are currently supported. * * @finders must be a non-empty %NULL-terminated array of the #OstreeRepoFinder * instances to use, or %NULL to use the system default set of finders, which * will typically be all available finders using their default options (but * this is not guaranteed). * * GPG verification of commits will be used unconditionally. * * This will use the thread-default #GMainContext, but will not iterate it. * * Since: 2017.8 */ void ostree_repo_find_remotes_async (OstreeRepo *self, const OstreeCollectionRef * const *refs, GVariant *options, OstreeRepoFinder **finders, OstreeAsyncProgress *progress, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { g_autoptr(GTask) task = NULL; g_autoptr(FindRemotesData) data = NULL; OstreeRepoFinder *default_finders[4] = { NULL, }; g_autoptr(OstreeRepoFinder) finder_config = NULL; g_autoptr(OstreeRepoFinder) finder_mount = NULL; g_autoptr(OstreeRepoFinder) finder_avahi = NULL; g_return_if_fail (OSTREE_IS_REPO (self)); g_return_if_fail (is_valid_collection_ref_array (refs)); g_return_if_fail (options == NULL || g_variant_is_of_type (options, G_VARIANT_TYPE_VARDICT)); g_return_if_fail (finders == NULL || is_valid_finder_array (finders)); g_return_if_fail (progress == NULL || OSTREE_IS_ASYNC_PROGRESS (progress)); g_return_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable)); /* Set up a task for the whole operation. */ task = g_task_new (self, cancellable, callback, user_data); g_task_set_source_tag (task, ostree_repo_find_remotes_async); /* Are we using #OstreeRepoFinders provided by the user, or the defaults? */ if (finders == NULL) { #ifdef HAVE_AVAHI GMainContext *context = g_main_context_get_thread_default (); g_autoptr(GError) local_error = NULL; #endif /* HAVE_AVAHI */ finder_config = OSTREE_REPO_FINDER (ostree_repo_finder_config_new ()); finder_mount = OSTREE_REPO_FINDER (ostree_repo_finder_mount_new (NULL)); #ifdef HAVE_AVAHI finder_avahi = OSTREE_REPO_FINDER (ostree_repo_finder_avahi_new (context)); #endif /* HAVE_AVAHI */ default_finders[0] = finder_config; default_finders[1] = finder_mount; default_finders[2] = finder_avahi; finders = default_finders; #ifdef HAVE_AVAHI ostree_repo_finder_avahi_start (OSTREE_REPO_FINDER_AVAHI (finder_avahi), &local_error); if (local_error != NULL) { g_warning ("Avahi finder failed; removing it: %s", local_error->message); default_finders[2] = NULL; g_clear_object (&finder_avahi); } #endif /* HAVE_AVAHI */ } /* We need to keep a pointer to the default Avahi finder so we can stop it * again after the operation, which happens implicitly by dropping the final * ref. */ data = find_remotes_data_new (refs, options, progress, finder_avahi); g_task_set_task_data (task, g_steal_pointer (&data), (GDestroyNotify) find_remotes_data_free); /* Asynchronously resolve all possible remotes for the given refs. */ ostree_repo_finder_resolve_all_async (finders, refs, self, cancellable, find_remotes_cb, g_steal_pointer (&task)); } /* Find the first instance of (@collection_id, @ref_name) in @refs and return * its index; or return %FALSE if nothing’s found. */ static gboolean collection_refv_contains (const OstreeCollectionRef * const *refs, const gchar *collection_id, const gchar *ref_name, gsize *out_index) { gsize i; for (i = 0; refs[i] != NULL; i++) { if (g_str_equal (refs[i]->collection_id, collection_id) && g_str_equal (refs[i]->ref_name, ref_name)) { *out_index = i; return TRUE; } } return FALSE; } /* For each ref from @refs which is listed in @summary_refs, cache its metadata * from the summary file entry into @commit_metadatas, and add the checksum it * points to into @refs_and_remotes_table at (@ref_index, @result_index). * @ref_index is the ref’s index in @refs. */ static gboolean find_remotes_process_refs (OstreeRepo *self, const OstreeCollectionRef * const *refs, OstreeRepoFinderResult *result, gsize result_index, const gchar *summary_collection_id, GVariant *summary_refs, GHashTable *commit_metadatas, PointerTable *refs_and_remotes_table) { gsize j, n; for (j = 0, n = g_variant_n_children (summary_refs); j < n; j++) { const guchar *csum_bytes; g_autoptr(GVariant) ref_v = NULL, csum_v = NULL, commit_metadata_v = NULL, stored_commit_metadata_v = NULL; guint64 commit_size, commit_timestamp; gchar tmp_checksum[OSTREE_SHA256_STRING_LEN + 1]; gsize ref_index; g_autoptr(GDateTime) dt = NULL; g_autoptr(GError) error = NULL; const gchar *ref_name; CommitMetadata *commit_metadata; /* Check the ref name. */ ref_v = g_variant_get_child_value (summary_refs, j); g_variant_get_child (ref_v, 0, "&s", &ref_name); if (!ostree_validate_rev (ref_name, &error)) { g_debug ("%s: Summary for result ‘%s’ contained invalid ref name ‘%s’: %s", G_STRFUNC, result->remote->name, ref_name, error->message); return FALSE; } /* Check the commit checksum. */ g_variant_get_child (ref_v, 1, "(t@ay@a{sv})", &commit_size, &csum_v, &commit_metadata_v); csum_bytes = ostree_checksum_bytes_peek_validate (csum_v, &error); if (csum_bytes == NULL) { g_debug ("%s: Summary for result ‘%s’ contained invalid ref checksum: %s", G_STRFUNC, result->remote->name, error->message); return FALSE; } ostree_checksum_inplace_from_bytes (csum_bytes, tmp_checksum); /* Is this a ref we care about? */ if (!collection_refv_contains (refs, summary_collection_id, ref_name, &ref_index)) continue; /* Load the commit metadata from disk if possible, for verification. */ if (!ostree_repo_load_commit (self, tmp_checksum, &stored_commit_metadata_v, NULL, NULL)) stored_commit_metadata_v = NULL; /* Check the additional metadata. */ if (!g_variant_lookup (commit_metadata_v, OSTREE_COMMIT_TIMESTAMP, "t", &commit_timestamp)) commit_timestamp = 0; /* unknown */ else commit_timestamp = GUINT64_FROM_BE (commit_timestamp); dt = g_date_time_new_from_unix_utc (commit_timestamp); if (dt == NULL) { g_debug ("%s: Summary for result ‘%s’ contained commit timestamp %" G_GUINT64_FORMAT " which is too far in the future. Resetting to 0.", G_STRFUNC, result->remote->name, commit_timestamp); commit_timestamp = 0; } /* Check and store the commit metadata. */ commit_metadata = g_hash_table_lookup (commit_metadatas, tmp_checksum); if (commit_metadata == NULL) { commit_metadata = commit_metadata_new (tmp_checksum, commit_size, (stored_commit_metadata_v != NULL) ? ostree_commit_get_timestamp (stored_commit_metadata_v) : 0, NULL); g_hash_table_insert (commit_metadatas, commit_metadata->checksum, commit_metadata /* transfer */); } /* Update the metadata if possible. */ if (commit_metadata->timestamp == 0) { commit_metadata->timestamp = commit_timestamp; } else if (commit_timestamp != 0 && commit_metadata->timestamp != commit_timestamp) { g_debug ("%s: Summary for result ‘%s’ contained commit timestamp %" G_GUINT64_FORMAT " which did not match existing timestamp %" G_GUINT64_FORMAT ". Ignoring.", G_STRFUNC, result->remote->name, commit_timestamp, commit_metadata->timestamp); return FALSE; } if (commit_size != commit_metadata->commit_size) { g_debug ("%s: Summary for result ‘%s’ contained commit size %" G_GUINT64_FORMAT "B which did not match existing size %" G_GUINT64_FORMAT "B. Ignoring.", G_STRFUNC, result->remote->name, commit_size, commit_metadata->commit_size); return FALSE; } pointer_table_set (refs_and_remotes_table, ref_index, result_index, commit_metadata->checksum); g_array_append_val (commit_metadata->refs, ref_index); g_debug ("%s: Remote ‘%s’ lists ref ‘%s’ mapping to commit ‘%s’.", G_STRFUNC, result->remote->name, ref_name, commit_metadata->checksum); } return TRUE; } static void find_remotes_cb (GObject *obj, GAsyncResult *result, gpointer user_data) { OstreeRepo *self; g_autoptr(GTask) task = NULL; GCancellable *cancellable; const FindRemotesData *data; const OstreeCollectionRef * const *refs; /* FIXME: We currently do nothing with @progress. Comment out to assuage -Wunused-variable */ /* OstreeAsyncProgress *progress; */ g_autoptr(GError) error = NULL; g_autoptr(GPtrArray) results = NULL; /* (element-type OstreeRepoFinderResult) */ gsize i; g_autoptr(PointerTable) refs_and_remotes_table = NULL; /* (element-type commit-checksum) */ g_autoptr(GHashTable) commit_metadatas = NULL; /* (element-type commit-checksum CommitMetadata) */ g_autoptr(OstreeFetcher) fetcher = NULL; g_autofree const gchar **ref_to_latest_commit = NULL; /* indexed as @refs; (element-type commit-checksum) */ gsize n_refs; g_autoptr(GPtrArray) remotes_to_remove = NULL; /* (element-type OstreeRemote) */ g_autoptr(GPtrArray) final_results = NULL; /* (element-type OstreeRepoFinderResult) */ task = G_TASK (user_data); self = OSTREE_REPO (g_task_get_source_object (task)); cancellable = g_task_get_cancellable (task); data = g_task_get_task_data (task); refs = (const OstreeCollectionRef * const *) data->refs; /* progress = data->progress; */ /* Finish finding the remotes. */ results = ostree_repo_finder_resolve_all_finish (result, &error); if (results == NULL) { g_task_return_error (task, g_steal_pointer (&error)); return; } if (results->len == 0) { g_task_return_pointer (task, g_steal_pointer (&results), (GDestroyNotify) g_ptr_array_unref); return; } /* Throughout this function, we eliminate invalid results from @results by * clearing them to %NULL. We cannot remove them from the array, as that messes * up iteration and stored array indices. Accordingly, we need the free function * to be %NULL-safe. */ g_ptr_array_set_free_func (results, (GDestroyNotify) repo_finder_result_free0); /* FIXME: Add support for options: * - override-commit-ids (allow downgrades) * * Use case: multiple pulls of separate subdirs; want them to use the same * configuration. * Use case: downgrading a flatpak app. */ /* FIXME: In future, we also want to pull static delta superblocks in this * phase, so that we have all the metadata we need for accurate size * estimation for the actual pull operation. This should check the * disable-static-deltas option first. */ /* Each key must be a pointer to the #CommitMetadata.checksum field of its value. */ commit_metadatas = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, (GDestroyNotify) commit_metadata_free); /* X dimension is an index into @refs. Y dimension is an index into @results. * Each cell stores the commit checksum which that ref resolves to on that * remote, or %NULL if the remote doesn’t have that ref. */ n_refs = g_strv_length ((gchar **) refs); /* it’s not a GStrv, but this works */ refs_and_remotes_table = pointer_table_new (n_refs, results->len); remotes_to_remove = g_ptr_array_new_with_free_func (NULL); /* Fetch and validate the summary file for each result. */ /* FIXME: All these downloads could be parallelised; that requires the * ostree_repo_remote_fetch_summary_with_options() API to be async. */ for (i = 0; i < results->len; i++) { OstreeRepoFinderResult *result = g_ptr_array_index (results, i); g_autoptr(GBytes) summary_bytes = NULL; g_autoptr(GVariant) summary_v = NULL; guint64 summary_last_modified; g_autoptr(GVariant) summary_refs = NULL; g_autoptr(GVariant) additional_metadata_v = NULL; g_autofree gchar *summary_collection_id = NULL; g_autoptr(GVariantIter) summary_collection_map = NULL; gboolean invalid_result = FALSE; /* Add the remote to our internal list of remotes, so other libostree * API can access it. */ if (!_ostree_repo_add_remote (self, result->remote)) g_ptr_array_add (remotes_to_remove, result->remote); g_debug ("%s: Fetching summary for remote ‘%s’ with keyring ‘%s’.", G_STRFUNC, result->remote->name, result->remote->keyring); /* Download the summary. This will load from the cache if possible. */ ostree_repo_remote_fetch_summary_with_options (self, result->remote->name, NULL, /* no options */ &summary_bytes, NULL, cancellable, &error); if (g_error_matches (error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) goto error; else if (error != NULL) { g_debug ("%s: Failed to download summary for result ‘%s’. Ignoring. %s", G_STRFUNC, result->remote->name, error->message); g_clear_pointer (&g_ptr_array_index (results, i), (GDestroyNotify) ostree_repo_finder_result_free); g_clear_error (&error); continue; } else if (summary_bytes == NULL) { g_debug ("%s: Failed to download summary for result ‘%s’. Ignoring. %s", G_STRFUNC, result->remote->name, "No summary file exists on server"); g_clear_pointer (&g_ptr_array_index (results, i), (GDestroyNotify) ostree_repo_finder_result_free); continue; } /* Check the metadata in the summary file, especially whether it contains * all the @refs we are interested in. */ summary_v = g_variant_new_from_bytes (OSTREE_SUMMARY_GVARIANT_FORMAT, summary_bytes, FALSE); /* Check the summary’s additional metadata and set up @commit_metadata * and @refs_and_remotes_table with all the refs listed in the summary * file which intersect with @refs. */ additional_metadata_v = g_variant_get_child_value (summary_v, 1); if (g_variant_lookup (additional_metadata_v, OSTREE_SUMMARY_COLLECTION_ID, "s", &summary_collection_id)) { summary_refs = g_variant_get_child_value (summary_v, 0); if (!find_remotes_process_refs (self, refs, result, i, summary_collection_id, summary_refs, commit_metadatas, refs_and_remotes_table)) { g_clear_pointer (&g_ptr_array_index (results, i), (GDestroyNotify) ostree_repo_finder_result_free); continue; } } if (!g_variant_lookup (additional_metadata_v, OSTREE_SUMMARY_COLLECTION_MAP, "a{sa(s(taya{sv}))}", &summary_collection_map)) summary_collection_map = NULL; while (summary_collection_map != NULL && g_variant_iter_loop (summary_collection_map, "{s@a(s(taya{sv}))}", &summary_collection_id, &summary_refs)) { if (!find_remotes_process_refs (self, refs, result, i, summary_collection_id, summary_refs, commit_metadatas, refs_and_remotes_table)) { g_clear_pointer (&g_ptr_array_index (results, i), (GDestroyNotify) ostree_repo_finder_result_free); invalid_result = TRUE; break; } } if (invalid_result) continue; /* Check the summary timestamp. */ if (!g_variant_lookup (additional_metadata_v, OSTREE_SUMMARY_LAST_MODIFIED, "t", &summary_last_modified)) summary_last_modified = 0; else summary_last_modified = GUINT64_FROM_BE (summary_last_modified); /* Update the stored result data. Clear the @ref_to_checksum map, since * it’s been moved to @refs_and_remotes_table and is now potentially out * of date. */ g_clear_pointer (&result->ref_to_checksum, g_hash_table_unref); result->summary_last_modified = summary_last_modified; } /* Fill in any gaps in the metadata for the most recent commits by pulling * the commit metadata from the remotes. The ‘most recent commits’ are the * set of head commits pointed to by the refs we just resolved from the * summary files. */ GLNX_HASH_TABLE_FOREACH_V (commit_metadatas, CommitMetadata*, commit_metadata) { char buf[_OSTREE_LOOSE_PATH_MAX]; g_autofree gchar *commit_filename = NULL; g_autoptr(GPtrArray) mirrorlist = NULL; /* (element-type OstreeFetcherURI) */ g_autoptr(GBytes) commit_bytes = NULL; g_autoptr(GVariant) commit_v = NULL; guint64 commit_timestamp; g_autoptr(GDateTime) dt = NULL; /* Already complete? */ if (commit_metadata->timestamp != 0) continue; _ostree_loose_path (buf, commit_metadata->checksum, OSTREE_OBJECT_TYPE_COMMIT, OSTREE_REPO_MODE_ARCHIVE); commit_filename = g_build_filename ("objects", buf, NULL); /* For each of the remotes whose summary files contain this ref, try * downloading the commit metadata until we succeed. Since the results are * in priority order, the most important remotes are tried first. */ for (i = 0; i < commit_metadata->refs->len; i++) { gsize ref_index = g_array_index (commit_metadata->refs, gsize, i); gsize j; for (j = 0; j < results->len; j++) { OstreeRepoFinderResult *result = g_ptr_array_index (results, j); /* Previous error processing this result? */ if (result == NULL) continue; if (pointer_table_get (refs_and_remotes_table, ref_index, j) != commit_metadata->checksum) continue; g_autofree gchar *uri = NULL; g_autoptr(OstreeFetcherURI) fetcher_uri = NULL; if (!ostree_repo_remote_get_url (self, result->remote->name, &uri, &error)) goto error; fetcher_uri = _ostree_fetcher_uri_parse (uri, &error); if (fetcher_uri == NULL) goto error; fetcher = _ostree_repo_remote_new_fetcher (self, result->remote->name, TRUE, NULL, &error); if (fetcher == NULL) goto error; g_debug ("%s: Fetching metadata for commit ‘%s’ from remote ‘%s’.", G_STRFUNC, commit_metadata->checksum, result->remote->name); /* FIXME: Support remotes which have contenturl, mirrorlist, etc. */ mirrorlist = g_ptr_array_new_with_free_func ((GDestroyNotify) _ostree_fetcher_uri_free); g_ptr_array_add (mirrorlist, g_steal_pointer (&fetcher_uri)); if (!_ostree_fetcher_mirrored_request_to_membuf (fetcher, mirrorlist, commit_filename, OSTREE_FETCHER_REQUEST_OPTIONAL_CONTENT, &commit_bytes, 0, /* no maximum size */ cancellable, &error)) goto error; g_autoptr(OstreeGpgVerifyResult) verify_result = NULL; verify_result = ostree_repo_verify_commit_for_remote (self, commit_metadata->checksum, result->remote->name, cancellable, &error); if (verify_result == NULL) { g_prefix_error (&error, "Commit %s: ", commit_metadata->checksum); goto error; } if (!ostree_gpg_verify_result_require_valid_signature (verify_result, &error)) { g_prefix_error (&error, "Commit %s: ", commit_metadata->checksum); goto error; } if (commit_bytes != NULL) break; } if (commit_bytes != NULL) break; } if (commit_bytes == NULL) { g_set_error (&error, G_IO_ERROR, G_IO_ERROR_FAILED, "Metadata not found for commit ‘%s’", commit_metadata->checksum); goto error; } /* Parse the commit metadata. */ commit_v = g_variant_new_from_bytes (OSTREE_COMMIT_GVARIANT_FORMAT, commit_bytes, FALSE); g_variant_get_child (commit_v, 5, "t", &commit_timestamp); commit_timestamp = GUINT64_FROM_BE (commit_timestamp); dt = g_date_time_new_from_unix_utc (commit_timestamp); if (dt == NULL) { g_debug ("%s: Commit ‘%s’ metadata contained timestamp %" G_GUINT64_FORMAT " which is too far in the future. Resetting to 0.", G_STRFUNC, commit_metadata->checksum, commit_timestamp); commit_timestamp = 0; } /* Update the #CommitMetadata. */ commit_metadata->timestamp = commit_timestamp; } /* Find the latest commit for each ref. This is where we resolve the * differences between remotes: two remotes could both contain ref R, but one * remote could be outdated compared to the other, and point to an older * commit. For each ref, we want to find the most recent commit any remote * points to for it. * * @ref_to_latest_commit is indexed by @ref_index, and its values are the * latest checksum for each ref. */ ref_to_latest_commit = g_new0 (const gchar *, n_refs); for (i = 0; i < n_refs; i++) { gsize j; const gchar *latest_checksum = NULL; const CommitMetadata *latest_commit_metadata = NULL; g_autofree gchar *latest_commit_timestamp_str = NULL; for (j = 0; j < results->len; j++) { const CommitMetadata *candidate_commit_metadata; const gchar *candidate_checksum; candidate_checksum = pointer_table_get (refs_and_remotes_table, i, j); if (candidate_checksum == NULL) continue; candidate_commit_metadata = g_hash_table_lookup (commit_metadatas, candidate_checksum); g_assert (candidate_commit_metadata != NULL); if (latest_commit_metadata == NULL || candidate_commit_metadata->timestamp > latest_commit_metadata->timestamp) { latest_checksum = candidate_checksum; latest_commit_metadata = candidate_commit_metadata; } } /* @latest_checksum could be %NULL here if there was an error downloading * the summary or commit metadata files above. */ ref_to_latest_commit[i] = latest_checksum; if (latest_commit_metadata != NULL) { latest_commit_timestamp_str = uint64_secs_to_iso8601 (latest_commit_metadata->timestamp); g_debug ("%s: Latest commit for ref (%s, %s) across all remotes is ‘%s’ with timestamp %s.", G_STRFUNC, refs[i]->collection_id, refs[i]->ref_name, latest_checksum, latest_commit_timestamp_str); } else { g_debug ("%s: Latest commit for ref (%s, %s) is unknown due to failure to download metadata.", G_STRFUNC, refs[i]->collection_id, refs[i]->ref_name); } } /* Recombine @commit_metadatas and @results so that each * #OstreeRepoFinderResult.refs lists the refs for which that remote has the * latest commits (i.e. it’s not out of date compared to some other remote). */ final_results = g_ptr_array_new_with_free_func ((GDestroyNotify) ostree_repo_finder_result_free); for (i = 0; i < results->len; i++) { OstreeRepoFinderResult *result = g_ptr_array_index (results, i); g_autoptr(GHashTable) validated_ref_to_checksum = NULL; /* (element-type utf8 utf8) */ gsize j, n_latest_refs; /* Previous error processing this result? */ if (result == NULL) continue; /* Map of refs to checksums provided by this result. The checksums should * be %NULL for each ref unless this result provides the latest checksum. */ validated_ref_to_checksum = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); n_latest_refs = 0; for (j = 0; refs[j] != NULL; j++) { const gchar *latest_commit_for_ref = ref_to_latest_commit[j]; if (pointer_table_get (refs_and_remotes_table, j, i) != latest_commit_for_ref) latest_commit_for_ref = NULL; if (latest_commit_for_ref != NULL) n_latest_refs++; g_hash_table_insert (validated_ref_to_checksum, ostree_collection_ref_dup (refs[j]), g_strdup (latest_commit_for_ref)); } if (n_latest_refs == 0) { g_debug ("%s: Omitting remote ‘%s’ from results as none of its refs are new enough.", G_STRFUNC, result->remote->name); ostree_repo_finder_result_free (g_steal_pointer (&g_ptr_array_index (results, i))); continue; } result->ref_to_checksum = g_steal_pointer (&validated_ref_to_checksum); g_ptr_array_add (final_results, g_steal_pointer (&g_ptr_array_index (results, i))); } /* Ensure the updated results are still in priority order. */ g_ptr_array_sort (final_results, sort_results_cb); /* Remove the remotes we temporarily added. * FIXME: It would be so much better if we could pass #OstreeRemote pointers * around internally, to avoid serialising on the global table of them. */ for (i = 0; i < remotes_to_remove->len; i++) { OstreeRemote *remote = g_ptr_array_index (remotes_to_remove, i); _ostree_repo_remove_remote (self, remote); } g_task_return_pointer (task, g_steal_pointer (&final_results), (GDestroyNotify) g_ptr_array_unref); return; error: /* Remove the remotes we temporarily added. */ for (i = 0; i < remotes_to_remove->len; i++) { OstreeRemote *remote = g_ptr_array_index (remotes_to_remove, i); _ostree_repo_remove_remote (self, remote); } g_task_return_error (task, g_steal_pointer (&error)); } /** * ostree_repo_find_remotes_finish: * @self: an #OstreeRepo * @result: the asynchronous result * @error: return location for a #GError, or %NULL * * Finish an asynchronous pull operation started with * ostree_repo_find_remotes_async(). * * Returns: (transfer full) (array zero-terminated=1): a potentially empty array * of #OstreeRepoFinderResults, followed by a %NULL terminator element; or * %NULL on error * Since: 2017.8 */ OstreeRepoFinderResult ** ostree_repo_find_remotes_finish (OstreeRepo *self, GAsyncResult *result, GError **error) { g_autoptr(GPtrArray) results = NULL; g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); g_return_val_if_fail (g_task_is_valid (result, self), NULL); g_return_val_if_fail (g_async_result_is_tagged (result, ostree_repo_find_remotes_async), NULL); g_return_val_if_fail (error == NULL || *error == NULL, NULL); results = g_task_propagate_pointer (G_TASK (result), error); if (results != NULL) { g_ptr_array_add (results, NULL); /* NULL terminator */ return (OstreeRepoFinderResult **) g_ptr_array_free (g_steal_pointer (&results), FALSE); } else return NULL; } static void copy_option (GVariantDict *master_options, GVariantDict *slave_options, const gchar *key, const GVariantType *expected_type) { g_autoptr(GVariant) option_v = g_variant_dict_lookup_value (master_options, key, expected_type); if (option_v != NULL) g_variant_dict_insert_value (slave_options, key, g_steal_pointer (&option_v)); } /** * ostree_repo_pull_from_remotes_async: * @self: an #OstreeRepo * @results: (array zero-terminated=1): %NULL-terminated array of remotes to * pull from, including the refs to pull from each * @options: (nullable): A GVariant `a{sv}` with an extensible set of flags * @progress: (nullable): an #OstreeAsyncProgress to update with the operation’s * progress, or %NULL * @cancellable: (nullable): a #GCancellable, or %NULL * @callback: asynchronous completion callback * @user_data: data to pass to @callback * * Pull refs from multiple remotes which have been found using * ostree_repo_find_remotes_async(). * * @results are expected to be in priority order, with the best remotes to pull * from listed first. ostree_repo_pull_from_remotes_async() will generally pull * from the remotes in order, but may parallelise its downloads. * * If an error is encountered when pulling from a given remote, that remote will * be ignored and another will be tried instead. If any refs have not been * downloaded successfully after all remotes have been tried, %G_IO_ERROR_FAILED * will be returned. The results of any successful downloads will remain cached * in the local repository. * * If @cancellable is cancelled, %G_IO_ERROR_CANCELLED will be returned * immediately. The results of any successfully completed downloads at that * point will remain cached in the local repository. * * GPG verification of commits will be used unconditionally. * * The following @options are currently defined: * * * `flags` (`i`): #OstreeRepoPullFlags to apply to the pull operation * * `inherit-transaction` (`b`): %TRUE to inherit an ongoing transaction on * the #OstreeRepo, rather than encapsulating the pull in a new one * * Since: 2017.8 */ void ostree_repo_pull_from_remotes_async (OstreeRepo *self, const OstreeRepoFinderResult * const *results, GVariant *options, OstreeAsyncProgress *progress, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { g_return_if_fail (OSTREE_IS_REPO (self)); g_return_if_fail (results != NULL && results[0] != NULL); g_return_if_fail (options == NULL || g_variant_is_of_type (options, G_VARIANT_TYPE ("a{sv}"))); g_return_if_fail (progress == NULL || OSTREE_IS_ASYNC_PROGRESS (progress)); g_return_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable)); g_autoptr(GTask) task = NULL; g_autoptr(GHashTable) refs_pulled = NULL; /* (element-type OstreeCollectionRef gboolean) */ gsize i, j; g_autoptr(GString) refs_unpulled_string = NULL; g_autoptr(GError) local_error = NULL; g_auto(GVariantDict) options_dict = OT_VARIANT_BUILDER_INITIALIZER; OstreeRepoPullFlags flags; gboolean inherit_transaction; /* Set up a task for the whole operation. */ task = g_task_new (self, cancellable, callback, user_data); g_task_set_source_tag (task, ostree_repo_pull_from_remotes_async); /* Keep track of the set of refs we’ve pulled already. Value is %TRUE if the * ref has been pulled; %FALSE if it has not. */ refs_pulled = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, NULL, NULL); g_variant_dict_init (&options_dict, options); if (!g_variant_dict_lookup (&options_dict, "flags", "i", &flags)) flags = OSTREE_REPO_PULL_FLAGS_NONE; if (!g_variant_dict_lookup (&options_dict, "inherit-transaction", "b", &inherit_transaction)) inherit_transaction = FALSE; /* Run all the local pull operations in a single overall transaction. */ if (!inherit_transaction && !ostree_repo_prepare_transaction (self, NULL, cancellable, &local_error)) { g_task_return_error (task, g_steal_pointer (&local_error)); return; } /* FIXME: Rework this code to pull in parallel where possible. At the moment * we expect the (i == 0) iteration will do all the work (all the refs) and * subsequent iterations are only there in case of error. * * The code is currently all synchronous, too. Making it asynchronous requires * the underlying pull code to be asynchronous. */ for (i = 0; results[i] != NULL; i++) { const OstreeRepoFinderResult *result = results[i]; g_autoptr(GString) refs_to_pull_str = NULL; g_autoptr(GPtrArray) refs_to_pull = NULL; /* (element-type OstreeCollectionRef) */ g_auto(GVariantBuilder) refs_to_pull_builder = OT_VARIANT_BUILDER_INITIALIZER; g_auto(GVariantDict) local_options_dict = OT_VARIANT_BUILDER_INITIALIZER; g_autoptr(GVariant) local_options = NULL; gboolean remove_remote; refs_to_pull = g_ptr_array_new_with_free_func (NULL); refs_to_pull_str = g_string_new (""); g_variant_builder_init (&refs_to_pull_builder, G_VARIANT_TYPE ("a(sss)")); GLNX_HASH_TABLE_FOREACH_KV (result->ref_to_checksum, const OstreeCollectionRef*, ref, const char*, checksum) { if (checksum != NULL && !GPOINTER_TO_INT (g_hash_table_lookup (refs_pulled, ref))) { g_ptr_array_add (refs_to_pull, (gpointer) ref); g_variant_builder_add (&refs_to_pull_builder, "(sss)", ref->collection_id, ref->ref_name, checksum); if (refs_to_pull_str->len > 0) g_string_append (refs_to_pull_str, ", "); g_string_append_printf (refs_to_pull_str, "(%s, %s)", ref->collection_id, ref->ref_name); } } if (refs_to_pull->len == 0) { g_debug ("Ignoring remote ‘%s’ as it has no relevant refs or they " "have already been pulled.", result->remote->name); continue; } /* NULL terminators. */ g_ptr_array_add (refs_to_pull, NULL); g_debug ("Pulling from remote ‘%s’: %s", result->remote->name, refs_to_pull_str->str); /* Set up the pull options. */ g_variant_dict_init (&local_options_dict, NULL); g_variant_dict_insert (&local_options_dict, "flags", "i", OSTREE_REPO_PULL_FLAGS_UNTRUSTED | flags); g_variant_dict_insert_value (&local_options_dict, "collection-refs", g_variant_builder_end (&refs_to_pull_builder)); g_variant_dict_insert (&local_options_dict, "gpg-verify", "b", TRUE); g_variant_dict_insert (&local_options_dict, "gpg-verify-summary", "b", FALSE); g_variant_dict_insert (&local_options_dict, "inherit-transaction", "b", TRUE); copy_option (&options_dict, &local_options_dict, "depth", G_VARIANT_TYPE ("i")); copy_option (&options_dict, &local_options_dict, "disable-static-deltas", G_VARIANT_TYPE ("b")); copy_option (&options_dict, &local_options_dict, "http-headers", G_VARIANT_TYPE ("a(ss)")); copy_option (&options_dict, &local_options_dict, "subdirs", G_VARIANT_TYPE ("as")); copy_option (&options_dict, &local_options_dict, "update-frequency", G_VARIANT_TYPE ("u")); local_options = g_variant_dict_end (&local_options_dict); /* FIXME: We do nothing useful with @progress at the moment. */ remove_remote = !_ostree_repo_add_remote (self, result->remote); ostree_repo_pull_with_options (self, result->remote->name, local_options, progress, cancellable, &local_error); if (remove_remote) _ostree_repo_remove_remote (self, result->remote); if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) { if (!inherit_transaction) ostree_repo_abort_transaction (self, NULL, NULL); g_task_return_error (task, g_steal_pointer (&local_error)); return; } for (j = 0; refs_to_pull->pdata[j] != NULL; j++) g_hash_table_replace (refs_pulled, refs_to_pull->pdata[j], GINT_TO_POINTER (local_error == NULL)); if (local_error != NULL) { g_debug ("Failed to pull refs from ‘%s’: %s", result->remote->name, local_error->message); g_clear_error (&local_error); continue; } else { g_debug ("Pulled refs from ‘%s’.", result->remote->name); } } /* Commit the transaction. */ if (!inherit_transaction && !ostree_repo_commit_transaction (self, NULL, cancellable, &local_error)) { g_task_return_error (task, g_steal_pointer (&local_error)); return; } /* Any refs left un-downloaded? If so, we’ve failed. */ GLNX_HASH_TABLE_FOREACH_KV (refs_pulled, const OstreeCollectionRef*, ref, gpointer, is_pulled_pointer) { gboolean is_pulled = GPOINTER_TO_INT (is_pulled_pointer); if (is_pulled) continue; if (refs_unpulled_string == NULL) refs_unpulled_string = g_string_new (""); else g_string_append (refs_unpulled_string, ", "); g_string_append_printf (refs_unpulled_string, "(%s, %s)", ref->collection_id, ref->ref_name); } if (refs_unpulled_string != NULL) { g_task_return_new_error (task, G_IO_ERROR, G_IO_ERROR_FAILED, "Failed to pull some refs from the remotes: %s", refs_unpulled_string->str); return; } g_task_return_boolean (task, TRUE); } /** * ostree_repo_pull_from_remotes_finish: * @self: an #OstreeRepo * @result: the asynchronous result * @error: return location for a #GError, or %NULL * * Finish an asynchronous pull operation started with * ostree_repo_pull_from_remotes_async(). * * Returns: %TRUE on success, %FALSE otherwise * Since: 2017.8 */ gboolean ostree_repo_pull_from_remotes_finish (OstreeRepo *self, GAsyncResult *result, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (g_task_is_valid (result, self), FALSE); g_return_val_if_fail (g_async_result_is_tagged (result, ostree_repo_pull_from_remotes_async), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); return g_task_propagate_boolean (G_TASK (result), error); } /* Check whether the given remote exists, has a `collection-id` key set, and it * equals @collection_id. If so, return %TRUE. Otherwise, %FALSE. */ static gboolean check_remote_matches_collection_id (OstreeRepo *repo, const gchar *remote_name, const gchar *collection_id) { g_autofree gchar *remote_collection_id = NULL; remote_collection_id = get_real_remote_repo_collection_id (repo, remote_name); if (remote_collection_id == NULL) return FALSE; return g_str_equal (remote_collection_id, collection_id); } /** * ostree_repo_resolve_keyring_for_collection: * @self: an #OstreeRepo * @collection_id: the collection ID to look up a keyring for * @cancellable: (nullable): a #GCancellable, or %NULL * @error: return location for a #GError, or %NULL * * Find the GPG keyring for the given @collection_id, using the local * configuration from the given #OstreeRepo. This will search the configured * remotes for ones whose `collection-id` key matches @collection_id, and will * return the GPG keyring from the first matching remote. * * If multiple remotes match and have different keyrings, a debug message will * be emitted, and the first result will be returned. It is expected that the * keyrings should match. * * If no match can be found, a %G_IO_ERROR_NOT_FOUND error will be returned. * * Returns: (transfer full): filename of the GPG keyring for @collection_id * Since: 2017.8 */ gchar * ostree_repo_resolve_keyring_for_collection (OstreeRepo *self, const gchar *collection_id, GCancellable *cancellable, GError **error) { gsize i; g_auto(GStrv) remotes = NULL; const OstreeRemote *keyring_remote = NULL; g_return_val_if_fail (OSTREE_IS_REPO (self), NULL); g_return_val_if_fail (ostree_validate_collection_id (collection_id, NULL), NULL); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), NULL); g_return_val_if_fail (error == NULL || *error == NULL, NULL); /* Look through all the currently configured remotes for the given collection. */ remotes = ostree_repo_remote_list (self, NULL); for (i = 0; remotes != NULL && remotes[i] != NULL; i++) { g_autoptr(GError) local_error = NULL; if (!check_remote_matches_collection_id (self, remotes[i], collection_id)) continue; if (keyring_remote == NULL) { g_debug ("%s: Found match for collection ‘%s’ in remote ‘%s’.", G_STRFUNC, collection_id, remotes[i]); keyring_remote = _ostree_repo_get_remote_inherited (self, remotes[i], &local_error); if (keyring_remote == NULL) { g_debug ("%s: Error loading remote ‘%s’: %s", G_STRFUNC, remotes[i], local_error->message); continue; } if (g_strcmp0 (keyring_remote->keyring, "") == 0 || g_strcmp0 (keyring_remote->keyring, "/dev/null") == 0) { g_debug ("%s: Ignoring remote ‘%s’ as it has no keyring configured.", G_STRFUNC, remotes[i]); continue; } /* continue so we can catch duplicates */ } else { g_debug ("%s: Duplicate keyring for collection ‘%s’ in remote ‘%s’." "Keyring will be loaded from remote ‘%s’.", G_STRFUNC, collection_id, remotes[i], keyring_remote->name); } } if (keyring_remote != NULL) return g_strdup (keyring_remote->keyring); else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "No keyring found configured locally for collection ‘%s’", collection_id); return NULL; } } #endif /* OSTREE_ENABLE_EXPERIMENTAL_API */ /** * ostree_repo_remote_fetch_summary_with_options: * @self: Self * @name: name of a remote * @options: (nullable): A GVariant a{sv} with an extensible set of flags * @out_summary: (out) (optional): return location for raw summary data, or * %NULL * @out_signatures: (out) (optional): return location for raw summary * signature data, or %NULL * @cancellable: a #GCancellable * @error: a #GError * * Like ostree_repo_remote_fetch_summary(), but supports an extensible set of flags. * The following are currently defined: * * - override-url (s): Fetch summary from this URL if remote specifies no metalink in options * - http-headers (a(ss)): Additional headers to add to all HTTP requests * * Returns: %TRUE on success, %FALSE on failure */ gboolean ostree_repo_remote_fetch_summary_with_options (OstreeRepo *self, const char *name, GVariant *options, GBytes **out_summary, GBytes **out_signatures, GCancellable *cancellable, GError **error) { g_autofree char *metalink_url_string = NULL; g_autoptr(GBytes) summary = NULL; g_autoptr(GBytes) signatures = NULL; gboolean ret = FALSE; gboolean gpg_verify_summary; g_return_val_if_fail (OSTREE_REPO (self), FALSE); g_return_val_if_fail (name != NULL, FALSE); if (!ostree_repo_get_remote_option (self, name, "metalink", NULL, &metalink_url_string, error)) goto out; if (!repo_remote_fetch_summary (self, name, metalink_url_string, options, &summary, &signatures, cancellable, error)) goto out; if (!ostree_repo_remote_get_gpg_verify_summary (self, name, &gpg_verify_summary, error)) goto out; if (gpg_verify_summary && signatures == NULL) { g_set_error (error, OSTREE_GPG_ERROR, OSTREE_GPG_ERROR_NO_SIGNATURE, "GPG verification enabled, but no summary signatures found (use gpg-verify-summary=false in remote config to disable)"); goto out; } /* Verify any summary signatures. */ if (gpg_verify_summary && summary != NULL && signatures != NULL) { g_autoptr(OstreeGpgVerifyResult) result = NULL; result = ostree_repo_verify_summary (self, name, summary, signatures, cancellable, error); if (!ostree_gpg_verify_result_require_valid_signature (result, error)) goto out; } if (out_summary != NULL) *out_summary = g_steal_pointer (&summary); if (out_signatures != NULL) *out_signatures = g_steal_pointer (&signatures); ret = TRUE; out: return ret; } #else /* HAVE_LIBCURL_OR_LIBSOUP */ gboolean ostree_repo_pull_with_options (OstreeRepo *self, const char *remote_name_or_baseurl, GVariant *options, OstreeAsyncProgress *progress, GCancellable *cancellable, GError **error) { g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, "This version of ostree was built without libsoup or libcurl, and cannot fetch over HTTP"); return FALSE; } gboolean ostree_repo_remote_fetch_summary_with_options (OstreeRepo *self, const char *name, GVariant *options, GBytes **out_summary, GBytes **out_signatures, GCancellable *cancellable, GError **error) { g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, "This version of ostree was built without libsoup or libcurl, and cannot fetch over HTTP"); return FALSE; } #endif /* HAVE_LIBCURL_OR_LIBSOUP */
1
12,694
Minor, can we drop the leading `!` and just swap the if and the else blocks?
ostreedev-ostree
c
@@ -102,7 +102,9 @@ class SCNetRoIHead(CascadeRoIHead): bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) - if self.with_semantic and semantic_feat is not None: + # bbox_feats.shape[0] > 0 is mean the number of proposal is not 0. + if self.with_semantic and semantic_feat is not None \ + and bbox_feats.shape[0] > 0: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
1
import numpy as np import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class SCNetRoIHead(CascadeRoIHead): """RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_stages (int): number of cascade stages. stage_loss_weights (list): loss weight of cascade stages. semantic_roi_extractor (dict): config to init semantic roi extractor. semantic_head (dict): config to init semantic head. feat_relay_head (dict): config to init feature_relay_head. glbctx_head (dict): config to init global context head. """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, feat_relay_head=None, glbctx_head=None, **kwargs): super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) if feat_relay_head is not None: self.feat_relay_head = build_head(feat_relay_head) if glbctx_head is not None: self.glbctx_head = build_head(glbctx_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.mask_head = build_head(mask_head) @property def with_semantic(self): """bool: whether the head has semantic head""" return hasattr(self, 'semantic_head') and self.semantic_head is not None @property def with_feat_relay(self): """bool: whether the head has feature relay head""" return (hasattr(self, 'feat_relay_head') and self.feat_relay_head is not None) @property def with_glbctx(self): """bool: whether the head has global context head""" return hasattr(self, 'glbctx_head') and self.glbctx_head is not None def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): """Fuse global context feats with roi feats.""" assert roi_feats.size(0) == rois.size(0) img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() fused_feats = torch.zeros_like(roi_feats) for img_id in img_inds: inds = (rois[:, 0] == img_id.item()) fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] return fused_feats def _slice_pos_feats(self, feats, sampling_results): """Get features from pos rois.""" num_rois = [res.bboxes.size(0) for res in sampling_results] num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] inds = torch.zeros(sum(num_rois), dtype=torch.bool) start = 0 for i in range(len(num_rois)): start = 0 if i == 0 else start + num_rois[i - 1] stop = start + num_pos_rois[i] inds[start:stop] = 1 sliced_feats = feats[inds] return sliced_feats def _bbox_forward(self, stage, x, rois, semantic_feat=None, glbctx_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and semantic_feat is not None: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat if self.with_glbctx and glbctx_feat is not None: bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) cls_score, bbox_pred, relayed_feat = bbox_head( bbox_feats, return_shared_feat=True) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, relayed_feat=relayed_feat) return bbox_results def _mask_forward(self, x, rois, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Mask head forward function used in both training and testing.""" mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_semantic and semantic_feat is not None: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.with_glbctx and glbctx_feat is not None: mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) if self.with_feat_relay and relayed_feat is not None: mask_feats = mask_feats + relayed_feat mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward_train(self, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward( x, pos_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results = loss_mask return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() # semantic segmentation branch if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None # global context branch if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) losses['loss_glbctx'] = loss_glbctx else: glbctx_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat, glbctx_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine boxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) if self.with_feat_relay: relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], sampling_results) relayed_feat = self.feat_relay_head(relayed_feat) else: relayed_feat = None mask_results = self._mask_forward_train(x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat, glbctx_feat, relayed_feat) mask_lw = sum(self.stage_loss_weights) losses['loss_mask'] = mask_lw * mask_results['loss_mask'] return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) else: glbctx_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) det_bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head.num_classes det_segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bbox_per_img, 0) # apply mask post-processing to each image individually det_segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: det_segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) det_segm_results.append(segm_result) # return results if self.with_mask: return list(zip(det_bbox_results, det_segm_results)) else: return det_bbox_results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) if self.with_glbctx: glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] else: glbctx_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) det_bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: det_segm_results = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] det_segm_results = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(det_bbox_results, det_segm_results)] else: return [det_bbox_results]
1
25,549
`bbox_feats.shape[0] > 0` requires the number of proposal is not 0.
open-mmlab-mmdetection
py
@@ -1,4 +1,4 @@ -const { h, Component } = require('preact') +const { Component } = require('preact') module.exports = class CloseWrapper extends Component { componentWillUnmount () {
1
const { h, Component } = require('preact') module.exports = class CloseWrapper extends Component { componentWillUnmount () { this.props.onUnmount() } render () { return this.props.children[0] } }
1
13,624
Just FYI, I had to remove this unused import because Travis/`npm run lint` were complaining.
transloadit-uppy
js
@@ -15,8 +15,10 @@ #include "domain.h" #include "error.h" #include "update.h" +#include "force.h" #include <cstring> +#include <fmt/format.h> using namespace LAMMPS_NS;
1
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, [email protected] Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #include "dump_atom_gz.h" #include "domain.h" #include "error.h" #include "update.h" #include <cstring> using namespace LAMMPS_NS; DumpAtomGZ::DumpAtomGZ(LAMMPS *lmp, int narg, char **arg) : DumpAtom(lmp, narg, arg) { gzFp = NULL; if (!compressed) error->all(FLERR,"Dump atom/gz only writes compressed files"); } /* ---------------------------------------------------------------------- */ DumpAtomGZ::~DumpAtomGZ() { if (gzFp) gzclose(gzFp); gzFp = NULL; fp = NULL; } /* ---------------------------------------------------------------------- generic opening of a dump file ASCII or binary or gzipped some derived classes override this function ------------------------------------------------------------------------- */ void DumpAtomGZ::openfile() { // single file, already opened, so just return if (singlefile_opened) return; if (multifile == 0) singlefile_opened = 1; // if one file per timestep, replace '*' with current timestep char *filecurrent = filename; if (multiproc) filecurrent = multiname; if (multifile) { char *filestar = filecurrent; filecurrent = new char[strlen(filestar) + 16]; char *ptr = strchr(filestar,'*'); *ptr = '\0'; if (padflag == 0) sprintf(filecurrent,"%s" BIGINT_FORMAT "%s", filestar,update->ntimestep,ptr+1); else { char bif[8],pad[16]; strcpy(bif,BIGINT_FORMAT); sprintf(pad,"%%s%%0%d%s%%s",padflag,&bif[1]); sprintf(filecurrent,pad,filestar,update->ntimestep,ptr+1); } *ptr = '*'; if (maxfiles > 0) { if (numfiles < maxfiles) { nameslist[numfiles] = new char[strlen(filecurrent)+1]; strcpy(nameslist[numfiles],filecurrent); ++numfiles; } else { remove(nameslist[fileidx]); delete[] nameslist[fileidx]; nameslist[fileidx] = new char[strlen(filecurrent)+1]; strcpy(nameslist[fileidx],filecurrent); fileidx = (fileidx + 1) % maxfiles; } } } // each proc with filewriter = 1 opens a file if (filewriter) { if (append_flag) { gzFp = gzopen(filecurrent,"ab9"); } else { gzFp = gzopen(filecurrent,"wb9"); } if (gzFp == NULL) error->one(FLERR,"Cannot open dump file"); } else gzFp = NULL; // delete string with timestep replaced if (multifile) delete [] filecurrent; } /* ---------------------------------------------------------------------- */ void DumpAtomGZ::write_header(bigint ndump) { if ((multiproc) || (!multiproc && me == 0)) { if (unit_flag && !unit_count) { ++unit_count; gzprintf(gzFp,"ITEM: UNITS\n%s\n",update->unit_style); } if (time_flag) gzprintf(gzFp,"ITEM: TIME\n%.16g\n",compute_time()); gzprintf(gzFp,"ITEM: TIMESTEP\n"); gzprintf(gzFp,BIGINT_FORMAT "\n",update->ntimestep); gzprintf(gzFp,"ITEM: NUMBER OF ATOMS\n"); gzprintf(gzFp,BIGINT_FORMAT "\n",ndump); if (domain->triclinic == 0) { gzprintf(gzFp,"ITEM: BOX BOUNDS %s\n",boundstr); gzprintf(gzFp,"%g %g\n",boxxlo,boxxhi); gzprintf(gzFp,"%g %g\n",boxylo,boxyhi); gzprintf(gzFp,"%g %g\n",boxzlo,boxzhi); } else { gzprintf(gzFp,"ITEM: BOX BOUNDS xy xz yz %s\n",boundstr); gzprintf(gzFp,"%g %g %g\n",boxxlo,boxxhi,boxxy); gzprintf(gzFp,"%g %g %g\n",boxylo,boxyhi,boxxz); gzprintf(gzFp,"%g %g %g\n",boxzlo,boxzhi,boxyz); } gzprintf(gzFp,"ITEM: ATOMS %s\n",columns); } } /* ---------------------------------------------------------------------- */ void DumpAtomGZ::write_data(int n, double *mybuf) { gzwrite(gzFp,mybuf,sizeof(char)*n); } /* ---------------------------------------------------------------------- */ void DumpAtomGZ::write() { DumpAtom::write(); if (filewriter) { if (multifile) { gzclose(gzFp); gzFp = NULL; } else { if (flush_flag) gzflush(gzFp,Z_SYNC_FLUSH); } } }
1
28,924
format.h is not a system header but bundled with LAMMPS. This should be `#include "fmt/format.h"`
lammps-lammps
cpp
@@ -441,8 +441,15 @@ public final class BKDReader extends PointValues implements Accountable { void visitDocValues(int[] commonPrefixLengths, byte[] scratchDataPackedValue, byte[] scratchMinIndexPackedValue, byte[] scratchMaxIndexPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException { + if (version >= BKDWriter.VERSION_LOW_CARDINALITY_LEAVES) { + visitDocValuesWithCardinality(commonPrefixLengths, scratchDataPackedValue, scratchMinIndexPackedValue, scratchMaxIndexPackedValue, in, docIDs, count, visitor); + } else { + visitDocValuesNoCardinality(commonPrefixLengths, scratchDataPackedValue, scratchMinIndexPackedValue, scratchMaxIndexPackedValue, in, docIDs, count, visitor); + } + } - + void visitDocValuesNoCardinality(int[] commonPrefixLengths, byte[] scratchDataPackedValue, byte[] scratchMinIndexPackedValue, byte[] scratchMaxIndexPackedValue, + IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException { readCommonPrefixes(commonPrefixLengths, scratchDataPackedValue, in); if (numIndexDims != 1 && version >= BKDWriter.VERSION_LEAF_STORES_BOUNDS) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.util.bkd; import java.io.IOException; import java.util.Arrays; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.PointValues; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.MathUtil; /** Handles intersection of an multi-dimensional shape in byte[] space with a block KD-tree previously written with {@link BKDWriter}. * * @lucene.experimental */ public final class BKDReader extends PointValues implements Accountable { // Packed array of byte[] holding all split values in the full binary tree: final int leafNodeOffset; final int numDataDims; final int numIndexDims; final int bytesPerDim; final int numLeaves; final IndexInput in; final int maxPointsInLeafNode; final byte[] minPackedValue; final byte[] maxPackedValue; final long pointCount; final int docCount; final int version; protected final int packedBytesLength; protected final int packedIndexBytesLength; final byte[] packedIndex; /** Caller must pre-seek the provided {@link IndexInput} to the index location that {@link BKDWriter#finish} returned */ public BKDReader(IndexInput in) throws IOException { version = CodecUtil.checkHeader(in, BKDWriter.CODEC_NAME, BKDWriter.VERSION_START, BKDWriter.VERSION_CURRENT); numDataDims = in.readVInt(); if (version >= BKDWriter.VERSION_SELECTIVE_INDEXING) { numIndexDims = in.readVInt(); } else { numIndexDims = numDataDims; } maxPointsInLeafNode = in.readVInt(); bytesPerDim = in.readVInt(); packedBytesLength = numDataDims * bytesPerDim; packedIndexBytesLength = numIndexDims * bytesPerDim; // Read index: numLeaves = in.readVInt(); assert numLeaves > 0; leafNodeOffset = numLeaves; minPackedValue = new byte[packedIndexBytesLength]; maxPackedValue = new byte[packedIndexBytesLength]; in.readBytes(minPackedValue, 0, packedIndexBytesLength); in.readBytes(maxPackedValue, 0, packedIndexBytesLength); for(int dim=0;dim<numIndexDims;dim++) { if (Arrays.compareUnsigned(minPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, maxPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) > 0) { throw new CorruptIndexException("minPackedValue " + new BytesRef(minPackedValue) + " is > maxPackedValue " + new BytesRef(maxPackedValue) + " for dim=" + dim, in); } } pointCount = in.readVLong(); docCount = in.readVInt(); int numBytes = in.readVInt(); packedIndex = new byte[numBytes]; in.readBytes(packedIndex, 0, numBytes); this.in = in; } long getMinLeafBlockFP() { return new ByteArrayDataInput(packedIndex).readVLong(); } /** Used to walk the in-heap index. The format takes advantage of the limited * access pattern to the BKD tree at search time, i.e. starting at the root * node and recursing downwards one child at a time. * @lucene.internal */ public class IndexTree implements Cloneable { private int nodeID; // level is 1-based so that we can do level-1 w/o checking each time: private int level; private int splitDim; private final byte[][] splitPackedValueStack; // used to read the packed byte[] private final ByteArrayDataInput in; // holds the minimum (left most) leaf block file pointer for each level we've recursed to: private final long[] leafBlockFPStack; // holds the address, in the packed byte[] index, of the left-node of each level: private final int[] leftNodePositions; // holds the address, in the packed byte[] index, of the right-node of each level: private final int[] rightNodePositions; // holds the splitDim for each level: private final int[] splitDims; // true if the per-dim delta we read for the node at this level is a negative offset vs. the last split on this dim; this is a packed // 2D array, i.e. to access array[level][dim] you read from negativeDeltas[level*numDims+dim]. this will be true if the last time we // split on this dimension, we next pushed to the left sub-tree: private final boolean[] negativeDeltas; // holds the packed per-level split values; the intersect method uses this to save the cell min/max as it recurses: private final byte[][] splitValuesStack; // scratch value to return from getPackedValue: private final BytesRef scratch; IndexTree() { int treeDepth = getTreeDepth(); splitPackedValueStack = new byte[treeDepth+1][]; nodeID = 1; level = 1; splitPackedValueStack[level] = new byte[packedIndexBytesLength]; leafBlockFPStack = new long[treeDepth+1]; leftNodePositions = new int[treeDepth+1]; rightNodePositions = new int[treeDepth+1]; splitValuesStack = new byte[treeDepth+1][]; splitDims = new int[treeDepth+1]; negativeDeltas = new boolean[numIndexDims*(treeDepth+1)]; in = new ByteArrayDataInput(packedIndex); splitValuesStack[0] = new byte[packedIndexBytesLength]; readNodeData(false); scratch = new BytesRef(); scratch.length = bytesPerDim; } public void pushLeft() { int nodePosition = leftNodePositions[level]; nodeID *= 2; level++; if (splitPackedValueStack[level] == null) { splitPackedValueStack[level] = new byte[packedIndexBytesLength]; } System.arraycopy(negativeDeltas, (level-1)*numIndexDims, negativeDeltas, level*numIndexDims, numIndexDims); assert splitDim != -1; negativeDeltas[level*numIndexDims+splitDim] = true; in.setPosition(nodePosition); readNodeData(true); } /** Clone, but you are not allowed to pop up past the point where the clone happened. */ @Override public IndexTree clone() { IndexTree index = new IndexTree(); index.nodeID = nodeID; index.level = level; index.splitDim = splitDim; index.leafBlockFPStack[level] = leafBlockFPStack[level]; index.leftNodePositions[level] = leftNodePositions[level]; index.rightNodePositions[level] = rightNodePositions[level]; index.splitValuesStack[index.level] = splitValuesStack[index.level].clone(); System.arraycopy(negativeDeltas, level*numIndexDims, index.negativeDeltas, level*numIndexDims, numIndexDims); index.splitDims[level] = splitDims[level]; return index; } public void pushRight() { int nodePosition = rightNodePositions[level]; nodeID = nodeID * 2 + 1; level++; if (splitPackedValueStack[level] == null) { splitPackedValueStack[level] = new byte[packedIndexBytesLength]; } System.arraycopy(negativeDeltas, (level-1)*numIndexDims, negativeDeltas, level*numIndexDims, numIndexDims); assert splitDim != -1; negativeDeltas[level*numIndexDims+splitDim] = false; in.setPosition(nodePosition); readNodeData(false); } public void pop() { nodeID /= 2; level--; splitDim = splitDims[level]; //System.out.println(" pop nodeID=" + nodeID); } public boolean isLeafNode() { return nodeID >= leafNodeOffset; } public boolean nodeExists() { return nodeID - leafNodeOffset < leafNodeOffset; } public int getNodeID() { return nodeID; } public byte[] getSplitPackedValue() { assert isLeafNode() == false; assert splitPackedValueStack[level] != null: "level=" + level; return splitPackedValueStack[level]; } /** Only valid after pushLeft or pushRight, not pop! */ public int getSplitDim() { assert isLeafNode() == false; return splitDim; } /** Only valid after pushLeft or pushRight, not pop! */ public BytesRef getSplitDimValue() { assert isLeafNode() == false; scratch.bytes = splitValuesStack[level]; scratch.offset = splitDim * bytesPerDim; return scratch; } /** Only valid after pushLeft or pushRight, not pop! */ public long getLeafBlockFP() { assert isLeafNode(): "nodeID=" + nodeID + " is not a leaf"; return leafBlockFPStack[level]; } /** Return the number of leaves below the current node. */ public int getNumLeaves() { int leftMostLeafNode = nodeID; while (leftMostLeafNode < leafNodeOffset) { leftMostLeafNode = leftMostLeafNode * 2; } int rightMostLeafNode = nodeID; while (rightMostLeafNode < leafNodeOffset) { rightMostLeafNode = rightMostLeafNode * 2 + 1; } final int numLeaves; if (rightMostLeafNode >= leftMostLeafNode) { // both are on the same level numLeaves = rightMostLeafNode - leftMostLeafNode + 1; } else { // left is one level deeper than right numLeaves = rightMostLeafNode - leftMostLeafNode + 1 + leafNodeOffset; } assert numLeaves == getNumLeavesSlow(nodeID) : numLeaves + " " + getNumLeavesSlow(nodeID); return numLeaves; } // for assertions private int getNumLeavesSlow(int node) { if (node >= 2 * leafNodeOffset) { return 0; } else if (node >= leafNodeOffset) { return 1; } else { final int leftCount = getNumLeavesSlow(node * 2); final int rightCount = getNumLeavesSlow(node * 2 + 1); return leftCount + rightCount; } } private void readNodeData(boolean isLeft) { leafBlockFPStack[level] = leafBlockFPStack[level-1]; // read leaf block FP delta if (isLeft == false) { leafBlockFPStack[level] += in.readVLong(); } if (isLeafNode()) { splitDim = -1; } else { // read split dim, prefix, firstDiffByteDelta encoded as int: int code = in.readVInt(); splitDim = code % numIndexDims; splitDims[level] = splitDim; code /= numIndexDims; int prefix = code % (1+bytesPerDim); int suffix = bytesPerDim - prefix; if (splitValuesStack[level] == null) { splitValuesStack[level] = new byte[packedIndexBytesLength]; } System.arraycopy(splitValuesStack[level-1], 0, splitValuesStack[level], 0, packedIndexBytesLength); if (suffix > 0) { int firstDiffByteDelta = code / (1+bytesPerDim); if (negativeDeltas[level*numIndexDims + splitDim]) { firstDiffByteDelta = -firstDiffByteDelta; } int oldByte = splitValuesStack[level][splitDim*bytesPerDim+prefix] & 0xFF; splitValuesStack[level][splitDim*bytesPerDim+prefix] = (byte) (oldByte + firstDiffByteDelta); in.readBytes(splitValuesStack[level], splitDim*bytesPerDim+prefix+1, suffix-1); } else { // our split value is == last split value in this dim, which can happen when there are many duplicate values } int leftNumBytes; if (nodeID * 2 < leafNodeOffset) { leftNumBytes = in.readVInt(); } else { leftNumBytes = 0; } leftNodePositions[level] = in.getPosition(); rightNodePositions[level] = leftNodePositions[level] + leftNumBytes; } } } private int getTreeDepth() { // First +1 because all the non-leave nodes makes another power // of 2; e.g. to have a fully balanced tree with 4 leaves you // need a depth=3 tree: // Second +1 because MathUtil.log computes floor of the logarithm; e.g. // with 5 leaves you need a depth=4 tree: return MathUtil.log(numLeaves, 2) + 2; } /** Used to track all state for a single call to {@link #intersect}. */ public static final class IntersectState { final IndexInput in; final int[] scratchDocIDs; final byte[] scratchDataPackedValue, scratchMinIndexPackedValue, scratchMaxIndexPackedValue; final int[] commonPrefixLengths; final IntersectVisitor visitor; public final IndexTree index; public IntersectState(IndexInput in, int numDims, int packedBytesLength, int packedIndexBytesLength, int maxPointsInLeafNode, IntersectVisitor visitor, IndexTree indexVisitor) { this.in = in; this.visitor = visitor; this.commonPrefixLengths = new int[numDims]; this.scratchDocIDs = new int[maxPointsInLeafNode]; this.scratchDataPackedValue = new byte[packedBytesLength]; this.scratchMinIndexPackedValue = new byte[packedIndexBytesLength]; this.scratchMaxIndexPackedValue = new byte[packedIndexBytesLength]; this.index = indexVisitor; } } @Override public void intersect(IntersectVisitor visitor) throws IOException { intersect(getIntersectState(visitor), minPackedValue, maxPackedValue); } @Override public long estimatePointCount(IntersectVisitor visitor) { return estimatePointCount(getIntersectState(visitor), minPackedValue, maxPackedValue); } /** Fast path: this is called when the query box fully encompasses all cells under this node. */ private void addAll(IntersectState state, boolean grown) throws IOException { //System.out.println("R: addAll nodeID=" + nodeID); if (grown == false) { final long maxPointCount = (long) maxPointsInLeafNode * state.index.getNumLeaves(); if (maxPointCount <= Integer.MAX_VALUE) { // could be >MAX_VALUE if there are more than 2B points in total state.visitor.grow((int) maxPointCount); grown = true; } } if (state.index.isLeafNode()) { assert grown; //System.out.println("ADDALL"); if (state.index.nodeExists()) { visitDocIDs(state.in, state.index.getLeafBlockFP(), state.visitor); } // TODO: we can assert that the first value here in fact matches what the index claimed? } else { state.index.pushLeft(); addAll(state, grown); state.index.pop(); state.index.pushRight(); addAll(state, grown); state.index.pop(); } } /** Create a new {@link IntersectState} */ public IntersectState getIntersectState(IntersectVisitor visitor) { IndexTree index = new IndexTree(); return new IntersectState(in.clone(), numDataDims, packedBytesLength, packedIndexBytesLength, maxPointsInLeafNode, visitor, index); } /** Visits all docIDs and packed values in a single leaf block */ public void visitLeafBlockValues(IndexTree index, IntersectState state) throws IOException { // Leaf node; scan and filter all points in this block: int count = readDocIDs(state.in, index.getLeafBlockFP(), state.scratchDocIDs); // Again, this time reading values and checking with the visitor visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, count, state.visitor); } private void visitDocIDs(IndexInput in, long blockFP, IntersectVisitor visitor) throws IOException { // Leaf node in.seek(blockFP); // How many points are stored in this leaf cell: int count = in.readVInt(); // No need to call grow(), it has been called up-front DocIdsWriter.readInts(in, count, visitor); } int readDocIDs(IndexInput in, long blockFP, int[] docIDs) throws IOException { in.seek(blockFP); // How many points are stored in this leaf cell: int count = in.readVInt(); DocIdsWriter.readInts(in, count, docIDs); return count; } void visitDocValues(int[] commonPrefixLengths, byte[] scratchDataPackedValue, byte[] scratchMinIndexPackedValue, byte[] scratchMaxIndexPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException { readCommonPrefixes(commonPrefixLengths, scratchDataPackedValue, in); if (numIndexDims != 1 && version >= BKDWriter.VERSION_LEAF_STORES_BOUNDS) { byte[] minPackedValue = scratchMinIndexPackedValue; System.arraycopy(scratchDataPackedValue, 0, minPackedValue, 0, packedIndexBytesLength); byte[] maxPackedValue = scratchMaxIndexPackedValue; //Copy common prefixes before reading adjusted // box System.arraycopy(minPackedValue, 0, maxPackedValue, 0, packedIndexBytesLength); readMinMax(commonPrefixLengths, minPackedValue, maxPackedValue, in); // The index gives us range of values for each dimension, but the actual range of values // might be much more narrow than what the index told us, so we double check the relation // here, which is cheap yet might help figure out that the block either entirely matches // or does not match at all. This is especially more likely in the case that there are // multiple dimensions that have correlation, ie. splitting on one dimension also // significantly changes the range of values in another dimension. Relation r = visitor.compare(minPackedValue, maxPackedValue); if (r == Relation.CELL_OUTSIDE_QUERY) { return; } visitor.grow(count); if (r == Relation.CELL_INSIDE_QUERY) { for (int i = 0; i < count; ++i) { visitor.visit(docIDs[i]); } return; } } else { visitor.grow(count); } int compressedDim = readCompressedDim(in); if (compressedDim == -1) { visitRawDocValues(commonPrefixLengths, scratchDataPackedValue, in, docIDs, count, visitor); } else { visitCompressedDocValues(commonPrefixLengths, scratchDataPackedValue, in, docIDs, count, visitor, compressedDim); } } private void readMinMax(int[] commonPrefixLengths, byte[] minPackedValue, byte[] maxPackedValue, IndexInput in) throws IOException { for (int dim = 0; dim < numIndexDims; dim++) { int prefix = commonPrefixLengths[dim]; in.readBytes(minPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix); in.readBytes(maxPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix); } } // Just read suffixes for every dimension private void visitRawDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException { for (int i = 0; i < count; ++i) { for(int dim=0;dim<numDataDims;dim++) { int prefix = commonPrefixLengths[dim]; in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix); } visitor.visit(docIDs[i], scratchPackedValue); } } private void visitCompressedDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor, int compressedDim) throws IOException { // the byte at `compressedByteOffset` is compressed using run-length compression, // other suffix bytes are stored verbatim final int compressedByteOffset = compressedDim * bytesPerDim + commonPrefixLengths[compressedDim]; commonPrefixLengths[compressedDim]++; int i; for (i = 0; i < count; ) { scratchPackedValue[compressedByteOffset] = in.readByte(); final int runLen = Byte.toUnsignedInt(in.readByte()); for (int j = 0; j < runLen; ++j) { for(int dim=0;dim<numDataDims;dim++) { int prefix = commonPrefixLengths[dim]; in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix); } visitor.visit(docIDs[i+j], scratchPackedValue); } i += runLen; } if (i != count) { throw new CorruptIndexException("Sub blocks do not add up to the expected count: " + count + " != " + i, in); } } private int readCompressedDim(IndexInput in) throws IOException { int compressedDim = in.readByte(); if (compressedDim < -1 || compressedDim >= numDataDims) { throw new CorruptIndexException("Got compressedDim="+compressedDim, in); } return compressedDim; } private void readCommonPrefixes(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in) throws IOException { for(int dim=0;dim<numDataDims;dim++) { int prefix = in.readVInt(); commonPrefixLengths[dim] = prefix; if (prefix > 0) { in.readBytes(scratchPackedValue, dim*bytesPerDim, prefix); } //System.out.println("R: " + dim + " of " + numDims + " prefix=" + prefix); } } private void intersect(IntersectState state, byte[] cellMinPacked, byte[] cellMaxPacked) throws IOException { /* System.out.println("\nR: intersect nodeID=" + state.index.getNodeID()); for(int dim=0;dim<numDims;dim++) { System.out.println(" dim=" + dim + "\n cellMin=" + new BytesRef(cellMinPacked, dim*bytesPerDim, bytesPerDim) + "\n cellMax=" + new BytesRef(cellMaxPacked, dim*bytesPerDim, bytesPerDim)); } */ Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked); if (r == Relation.CELL_OUTSIDE_QUERY) { // This cell is fully outside of the query shape: stop recursing } else if (r == Relation.CELL_INSIDE_QUERY) { // This cell is fully inside of the query shape: recursively add all points in this cell without filtering addAll(state, false); // The cell crosses the shape boundary, or the cell fully contains the query, so we fall through and do full filtering: } else if (state.index.isLeafNode()) { // TODO: we can assert that the first value here in fact matches what the index claimed? // In the unbalanced case it's possible the left most node only has one child: if (state.index.nodeExists()) { // Leaf node; scan and filter all points in this block: int count = readDocIDs(state.in, state.index.getLeafBlockFP(), state.scratchDocIDs); // Again, this time reading values and checking with the visitor visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, count, state.visitor); } } else { // Non-leaf node: recurse on the split left and right nodes int splitDim = state.index.getSplitDim(); assert splitDim >= 0: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims; assert splitDim < numIndexDims: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims; byte[] splitPackedValue = state.index.getSplitPackedValue(); BytesRef splitDimValue = state.index.getSplitDimValue(); assert splitDimValue.length == bytesPerDim; //System.out.println(" splitDimValue=" + splitDimValue + " splitDim=" + splitDim); // make sure cellMin <= splitValue <= cellMax: assert Arrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims; assert Arrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims; // Recurse on left sub-tree: System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength); System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); state.index.pushLeft(); intersect(state, cellMinPacked, splitPackedValue); state.index.pop(); // Restore the split dim value since it may have been overwritten while recursing: System.arraycopy(splitPackedValue, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset, bytesPerDim); // Recurse on right sub-tree: System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength); System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); state.index.pushRight(); intersect(state, splitPackedValue, cellMaxPacked); state.index.pop(); } } private long estimatePointCount(IntersectState state, byte[] cellMinPacked, byte[] cellMaxPacked) { /* System.out.println("\nR: intersect nodeID=" + state.index.getNodeID()); for(int dim=0;dim<numDims;dim++) { System.out.println(" dim=" + dim + "\n cellMin=" + new BytesRef(cellMinPacked, dim*bytesPerDim, bytesPerDim) + "\n cellMax=" + new BytesRef(cellMaxPacked, dim*bytesPerDim, bytesPerDim)); } */ Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked); if (r == Relation.CELL_OUTSIDE_QUERY) { // This cell is fully outside of the query shape: stop recursing return 0L; } else if (r == Relation.CELL_INSIDE_QUERY) { return (long) maxPointsInLeafNode * state.index.getNumLeaves(); } else if (state.index.isLeafNode()) { // Assume half the points matched return (maxPointsInLeafNode + 1) / 2; } else { // Non-leaf node: recurse on the split left and right nodes int splitDim = state.index.getSplitDim(); assert splitDim >= 0: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims; assert splitDim < numIndexDims: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims; byte[] splitPackedValue = state.index.getSplitPackedValue(); BytesRef splitDimValue = state.index.getSplitDimValue(); assert splitDimValue.length == bytesPerDim; //System.out.println(" splitDimValue=" + splitDimValue + " splitDim=" + splitDim); // make sure cellMin <= splitValue <= cellMax: assert Arrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims; assert Arrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims; // Recurse on left sub-tree: System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength); System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); state.index.pushLeft(); final long leftCost = estimatePointCount(state, cellMinPacked, splitPackedValue); state.index.pop(); // Restore the split dim value since it may have been overwritten while recursing: System.arraycopy(splitPackedValue, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset, bytesPerDim); // Recurse on right sub-tree: System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength); System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim); state.index.pushRight(); final long rightCost = estimatePointCount(state, splitPackedValue, cellMaxPacked); state.index.pop(); return leftCost + rightCost; } } @Override public long ramBytesUsed() { return packedIndex.length; } @Override public byte[] getMinPackedValue() { return minPackedValue.clone(); } @Override public byte[] getMaxPackedValue() { return maxPackedValue.clone(); } @Override public int getNumDataDimensions() { return numDataDims; } @Override public int getNumIndexDimensions() { return numIndexDims; } @Override public int getBytesPerDimension() { return bytesPerDim; } @Override public long size() { return pointCount; } @Override public int getDocCount() { return docCount; } public boolean isLeafNode(int nodeID) { return nodeID >= leafNodeOffset; } }
1
29,478
could we always call visitDocValuesWithCardinality? It seems to include the version check already?
apache-lucene-solr
java
@@ -607,6 +607,10 @@ public class RegistrationRequest { + HUB_HOST + " X -" + HUB_PORT + " 5555. The specified config was -" + HUB_HOST + " " + hub + " -" + HUB_PORT + " " + port); } + + if(port==-1){ + throw new GridConfigurationException("No port was specified in -hub parameter. Example - http://"+hub+":4444/grid/register"); + } }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.grid.common; import com.google.common.collect.Maps; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.JsonSyntaxException; import org.openqa.grid.common.exception.GridConfigurationException; import org.openqa.grid.common.exception.GridException; import org.openqa.selenium.Platform; import org.openqa.selenium.net.NetworkUtils; import org.openqa.selenium.remote.CapabilityType; import org.openqa.selenium.remote.DesiredCapabilities; import org.openqa.selenium.remote.JsonToBeanConverter; import org.openqa.selenium.server.RemoteControlConfiguration; import org.openqa.selenium.server.browserlaunchers.BrowserLauncherFactory; import org.openqa.selenium.server.cli.RemoteControlLauncher; import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLDecoder; import java.security.InvalidParameterException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Logger; /** * helper to register to the grid. Using JSON to exchange the object between the node and grid. */ public class RegistrationRequest { private String id; private String name; private String description; private GridRole role; private List<DesiredCapabilities> capabilities = new ArrayList<DesiredCapabilities>(); private Map<String, Object> configuration = new HashMap<String, Object>(); private String[] args; private static final Logger LOG = Logger.getLogger(RegistrationRequest.class.getName()); // some special param for capability public static final String APP = "applicationName"; public static final String MAX_INSTANCES = "maxInstances"; // see enum SeleniumProtocol public static final String SELENIUM_PROTOCOL = "seleniumProtocol"; public static final String PATH = "path"; public static final String BROWSER = CapabilityType.BROWSER_NAME; public static final String PLATFORM = CapabilityType.PLATFORM; public static final String VERSION = CapabilityType.VERSION; // some special param for config public static final String REGISTER_CYCLE = "registerCycle"; public static final String PROXY_CLASS = CapabilityType.PROXY; public static final String CLEAN_UP_CYCLE = "cleanUpCycle"; // Client timeout public static final String TIME_OUT = "timeout"; public static final String BROWSER_TIME_OUT = "browserTimeout"; // TODO delete to keep only HUB_HOST and HUB_PORT public static final String REMOTE_HOST = "remoteHost"; public static final String MAX_SESSION = "maxSession"; public static final String AUTO_REGISTER = "register"; // polling nodes params public static final String NODE_POLLING = "nodePolling"; public static final String UNREGISTER_IF_STILL_DOWN_AFTER = "unregisterIfStillDownAfter"; public static final String DOWN_POLLING_LIMIT = "downPollingLimit"; public static final String STATUS_CHECK_TIMEOUT = "nodeStatusCheckTimeout"; public static final String MAX_TESTS_BEFORE_CLEAN = "maxTestBeforeClean"; public static final String CLEAN_SNAPSHOT = "cleanSnapshot"; public static final String HOST = "host"; public static final String PORT = "port"; public static final String HUB_HOST = "hubHost"; public static final String HUB_PORT = "hubPort"; public static final String SERVLETS = "servlets"; public static final String ID = "id"; public RegistrationRequest() { args = new String[0]; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public List<DesiredCapabilities> getCapabilities() { return capabilities; } public void addDesiredCapability(DesiredCapabilities c) { this.capabilities.add(c); } public void addDesiredCapability(Map<String, Object> c) { this.capabilities.add(new DesiredCapabilities(c)); } public void setCapabilities(List<DesiredCapabilities> capabilities) { this.capabilities = capabilities; } public Map<String, Object> getConfiguration() { return configuration; } public void setConfiguration(Map<String, Object> configuration) { this.configuration = configuration; } public String toJSON() { return new Gson().toJson(getAssociatedJSON()); } public JsonObject getAssociatedJSON() { JsonObject res = new JsonObject(); res.addProperty("class", getClass().getCanonicalName()); res.addProperty("id", id); res.addProperty("name", name); res.addProperty("description", description); res.add("configuration", new Gson().toJsonTree(configuration)); JsonArray caps = new JsonArray(); for (DesiredCapabilities c : capabilities) { caps.add(new Gson().toJsonTree(c.asMap())); } res.add("capabilities", caps); return res; } public String getConfigAsString(String param) { Object res = configuration.get(param); return res == null ? null : res.toString(); } public int getConfigAsInt(String param, int defaultValue) { Object o = configuration.get(param); if (o == null) { return defaultValue; } if (o instanceof Integer) { return (Integer) o; } try { return Integer.parseInt(o.toString()); } catch (NumberFormatException t) { LOG.warning(String.format( "Parameter %s has value '%s', but it is supposed to be an int. Keeping default of %s", param, o, defaultValue)); return defaultValue; } } /** * fixing a backward compatibility issue causing #2738 After 2.9 release, the remoteProxy for a * node changed for 2 type of nodes to single node answering both sel1 and webdriver protocol. * <p> * That means the hub now need to handle registration request containing * "url":"http://ip:port/selenium-server/driver" ( < v2.9 , RC ),"url":"http://ip:port/wd/hub" * (< v2.9, wb) * <p> * and "remoteHost":"http://ip:port" ( > v2.9 ). * * The pre 2.9 registration requests need to be updated and take the "url" config param and * generate the "remoteHost" out of it. */ private void ensureBackwardCompatibility() { // new param after 2.9 String url = (String) configuration.get(REMOTE_HOST); if (url != null) { return; } else { // could be a pre 2.9 node url = (String) configuration.get("url"); if (url == null) { return; } else { // was a legacy RC node. Needs to set that on the capabilities, as webdriver is the default. if (url.contains("selenium-server/driver")) { for (DesiredCapabilities capability : capabilities) { capability.setCapability(SELENIUM_PROTOCOL, SeleniumProtocol.Selenium.toString()); } } URL tmp; try { tmp = new URL(url); } catch (MalformedURLException e) { throw new GridException("specified URL for the node isn't valid :" + url); } configuration.put(REMOTE_HOST, "http://" + tmp.getHost() + ":" + tmp.getPort()); } } } /** * Create an object from a registration request formatted as a json string. * * @param json * @return create a request from the JSON request received. */ @SuppressWarnings("unchecked") // JSON lib public static RegistrationRequest getNewInstance(String json) { RegistrationRequest request = new RegistrationRequest(); try { JsonObject o = new JsonParser().parse(json).getAsJsonObject(); if (o.has("id")) request.setId(o.get("id").getAsString()); if (o.has("name")) request.setName(o.get("name").getAsString()); if (o.has("description")) request.setDescription(o.get("description").getAsString()); JsonObject config = o.get("configuration").getAsJsonObject(); Map<String, Object> configuration = new JsonToBeanConverter().convert(Map.class, config); // For backward compatibility numbers should be converted to integers for (String key : configuration.keySet()) { Object value = configuration.get(key); if (value instanceof Long) { configuration.put(key, ((Long) value).intValue()); } } request.setConfiguration(configuration); JsonArray capabilities = o.get("capabilities").getAsJsonArray(); for (int i = 0; i < capabilities.size(); i++) { DesiredCapabilities cap = new JsonToBeanConverter() .convert(DesiredCapabilities.class, capabilities.get(i)); request.capabilities.add(cap); } request.ensureBackwardCompatibility(); return request; } catch (JsonSyntaxException e) { // Check if it was a Selenium Grid 1.0 request. return parseGrid1Request(json); } } /** * if a PROXY_CLASS is specified in the request, the proxy created following this request will be * of that type. If nothing is specified, it will use RemoteProxy * * @return null if no class was specified. */ public String getRemoteProxyClass() { Object o = getConfiguration().get(PROXY_CLASS); return o == null ? null : o.toString(); } private static RegistrationRequest parseGrid1Request(String clientRequest) { // Check if it's a Selenium Grid 1.0 node connecting. // If so, the string will be of the format: // host=localhost&port=5000&environment=linux_firefox_3_6 Map<String, String> registrationInfo = Maps.newHashMap(); // Attempt to parse the client request string. String parts[] = clientRequest.split("&"); for (String part : parts) { String configItem[] = part.split("="); // Do some basic taint checking so we can exit early if it's not // really a key=value pair. if (configItem.length != 2) { throw new InvalidParameterException(); } try { registrationInfo.put(URLDecoder.decode(configItem[0], "UTF-8"), URLDecoder.decode(configItem[1], "UTF-8")); } catch (UnsupportedEncodingException e) { LOG.warning(String.format("Unable to decode registration request portion: %s", part)); } } // Now validate the query string. if ((registrationInfo.get("port") != null) && (registrationInfo.get("environment") != null)) { RegistrationRequest request = new RegistrationRequest(); Map<String, Object> configuration = Maps.newHashMap(); configuration.put(SELENIUM_PROTOCOL, SeleniumProtocol.Selenium.toString()); configuration .put( REMOTE_HOST, String.format("http://%s:%s", registrationInfo.get("host"), registrationInfo.get("port"))); request.setConfiguration(configuration); DesiredCapabilities cap = new DesiredCapabilities(); // cap.put(CapabilityType.PLATFORM, "LINUX"); // TODO freynaud envt or browser ? cap.setCapability(BROWSER, registrationInfo.get("environment")); cap.setCapability("environment", registrationInfo.get("environment")); request.capabilities.add(cap); return request; } else { throw new InvalidParameterException(); } } public static RegistrationRequest build(String... args) { RegistrationRequest res = new RegistrationRequest(); res.args = args; CommandLineOptionHelper helper = new CommandLineOptionHelper(args); res.role = GridRole.find(args); String defaultConfig = "defaults/DefaultNode.json"; String nodeType = helper.getParamValue("-role"); if (GridRole.isRC(nodeType)) { defaultConfig = "defaults/DefaultNodeSelenium.json"; } if (GridRole.isWebDriver(nodeType)) { defaultConfig = "defaults/DefaultNodeWebDriver.json"; } res.loadFromJSON(defaultConfig); // -file *.json ? if (helper.isParamPresent("-nodeConfig")) { String value = helper.getParamValue("-nodeConfig"); res.loadFromJSON(value); } // from command line res.loadFromCommandLine(args); for (DesiredCapabilities cap : res.capabilities) { if (SeleniumProtocol.Selenium.toString().equals(cap.getCapability(SELENIUM_PROTOCOL))) { if (!BrowserLauncherFactory.isBrowserSupported(cap.getBrowserName())) { throw new GridConfigurationException("browser " + cap.getBrowserName() + " is not supported, supported browsers are:\n" + BrowserLauncherFactory.getSupportedBrowsersAsString()); } } if (cap.getCapability(SELENIUM_PROTOCOL) == null) { cap.setCapability(SELENIUM_PROTOCOL, GridRole.isRC(nodeType) ? SeleniumProtocol.Selenium.toString() : SeleniumProtocol.WebDriver.toString()); } } res.configuration.put(HOST, guessHost((String) res.configuration.get(HOST))); res.configuration.put(HUB_HOST, guessHost((String) res.configuration.get(HUB_HOST))); // some values can be calculated. if (res.configuration.get(REMOTE_HOST) == null) { String url = "http://" + res.configuration.get(HOST) + ":" + res.configuration.get(PORT); res.configuration.put(REMOTE_HOST, url); } // The hub in < v2.9 expects a "url" param, not "remoteHost". While the configuration option was updated to // reflect its new intent, they're logically equivalent for the purposes of setting the proxy ID. I.e., the old hub // used the "url" value for the proxy ID, while the new one uses "remoteHost". So, just set "url" to be "remoteHost" // to make things work fine with older hubs. res.configuration.put("url", res.configuration.get(REMOTE_HOST)); String u = (String) res.configuration.get("hub"); if (u != null) { try { URL ur = new URL(u); res.configuration.put(HUB_HOST, ur.getHost()); res.configuration.put(HUB_PORT, ur.getPort()); } catch (MalformedURLException e) { throw new GridConfigurationException("the specified hub is not valid : -hub " + u); } } return res; } private static String guessHost(String host) { if ("ip".equalsIgnoreCase(host)) { NetworkUtils util = new NetworkUtils(); return util.getIp4NonLoopbackAddressOfThisMachine().getHostAddress(); } else if ("host".equalsIgnoreCase(host)) { NetworkUtils util = new NetworkUtils(); return util.getIp4NonLoopbackAddressOfThisMachine().getHostName(); } else { return host; } } private void loadFromCommandLine(String[] args) { CommandLineOptionHelper helper = new CommandLineOptionHelper(args); // storing them all. List<String> params = helper.getKeys(); for (String param : params) { String value = helper.getParamValue(param); try { int i = Integer.parseInt(value); configuration.put(param.replaceFirst("-", ""), i); } catch (NumberFormatException e) { configuration.put(param.replaceFirst("-", ""), value); } } // handle the core config, do a bit of casting. // handle the core config, do a bit of casting. if (helper.isParamPresent("-hubHost")) { configuration.put(HUB_HOST, helper.getParamValue("-hubHost")); } if (helper.isParamPresent("-" + HUB_PORT)) { configuration.put(HUB_PORT, Integer.parseInt(helper.getParamValue("-" + HUB_PORT))); } if (helper.isParamPresent("-host")) { configuration.put(HOST, helper.getParamValue("-host")); } if (helper.isParamPresent("-port")) { configuration.put(PORT, Integer.parseInt(helper.getParamValue("-port"))); } if (helper.isParamPresent("-cleanUpCycle")) { configuration.put(CLEAN_UP_CYCLE, Integer.parseInt(helper.getParamValue("-cleanUpCycle"))); } if (helper.isParamPresent("-timeout")) { configuration.put(TIME_OUT, Integer.parseInt(helper.getParamValue("-timeout"))); } if (helper.isParamPresent("-browserTimeout")) { configuration.put(BROWSER_TIME_OUT, Integer.parseInt(helper.getParamValue("-browserTimeout"))); } if (helper.isParamPresent("-maxSession")) { configuration.put(MAX_SESSION, Integer.parseInt(helper.getParamValue("-maxSession"))); } if (helper.isParamPresent("-" + AUTO_REGISTER)) { configuration.put(AUTO_REGISTER, Boolean.parseBoolean(helper.getParamValue("-" + AUTO_REGISTER))); } if (helper.isParamPresent("-servlets")) { configuration.put(SERVLETS, helper.getParamValue("-servlets")); } // capabilities parsing. List<String> l = helper.getAll("-browser"); if (!l.isEmpty()) { capabilities.clear(); for (String s : l) { DesiredCapabilities c = addCapabilityFromString(s); capabilities.add(c); } } addPlatformInfoToCapabilities(); } private DesiredCapabilities addCapabilityFromString(String capability) { LOG.info("Adding " + capability); String[] s = capability.split(","); if (s.length == 0) { throw new GridConfigurationException("-browser must be followed by a browser description"); } DesiredCapabilities res = new DesiredCapabilities(); for (String capabilityPair : s) { capabilityPair = capabilityPair.trim(); if (capabilityPair.split("=").length != 2) { throw new GridConfigurationException("-browser format is key1=value1,key2=value2 " + capabilityPair + " doesn't follow that format."); } String key = capabilityPair.split("=")[0]; String value = capabilityPair.split("=")[1]; res.setCapability(key, value); } if (res.getBrowserName() == null) { throw new GridConfigurationException( "You need to specify a browserName using browserName=XXX"); } return res; } private void addPlatformInfoToCapabilities() { Platform current = Platform.getCurrent(); for (DesiredCapabilities cap : capabilities) { if (cap.getPlatform() == null) { cap.setPlatform(current); } } } /** * add config, but overwrite capabilities. * * @param resource */ public void loadFromJSON(String resource) { try { JsonObject base = JSONConfigurationUtils.loadJSON(resource); if (base.has("capabilities")) { capabilities = new ArrayList<DesiredCapabilities>(); JsonArray a = base.get("capabilities").getAsJsonArray(); for (int i = 0; i < a.size(); i++) { DesiredCapabilities c = new JsonToBeanConverter() .convert(DesiredCapabilities.class, a.get(i)); capabilities.add(c); } addPlatformInfoToCapabilities(); } JsonObject o = base.get("configuration").getAsJsonObject(); for (Map.Entry<String, JsonElement> entry : o.entrySet()) { Object value = new JsonToBeanConverter().convert(Object.class, entry.getValue()); // For backward compatibility numbers should be converted to integers if (value instanceof Long) { value = ((Long) value).intValue(); } configuration.put(entry.getKey(), value); } } catch (Throwable e) { throw new GridConfigurationException("Error with the JSON of the config : " + e.getMessage(), e); } } public GridRole getRole() { return role; } public void setRole(GridRole role) { this.role = role; } public RemoteControlConfiguration getRemoteControlConfiguration() { List<String> params = new ArrayList<String>(); for (String key : configuration.keySet()) { params.add("-" + key); if (!configuration.get(key).toString().trim().isEmpty()) { params.add("" + configuration.get(key)); } } return RemoteControlLauncher.parseLauncherOptions(params.toArray(new String[params.size()])); } public String[] getArgs() { return args; } /** * Validate the current setting and throw a config exception is an invalid setup is detected. * * @throws GridConfigurationException */ public void validate() throws GridConfigurationException { String hub = (String) configuration.get(HUB_HOST); Integer port = (Integer) configuration.get(HUB_PORT); if (hub == null || port == null) { throw new GridConfigurationException("You need to specify a hub to register to using -" + HUB_HOST + " X -" + HUB_PORT + " 5555. The specified config was -" + HUB_HOST + " " + hub + " -" + HUB_PORT + " " + port); } } }
1
12,168
I think if they _don't_ specify a port, we should assume grid default (which is 4444)
SeleniumHQ-selenium
js
@@ -155,14 +155,12 @@ std::vector<std::string> enumerateTautomerSmiles( cleanup(*mol, params); MolOps::sanitizeMol(*mol); - auto *tautparams = new TautomerCatalogParams(params.tautomerTransforms); - // unsigned int ntautomers = tautparams->getNumTautomers(); - TautomerEnumerator te(new TautomerCatalog(tautparams)); + TautomerEnumerator te(params); - std::vector<ROMOL_SPTR> res = te.enumerate(*mol); + auto res = te.enumerate(*mol); std::vector<std::string> tsmiles; - for (const auto &r : res) { + for (const auto &r : res.tautomers) { tsmiles.push_back(MolToSmiles(*r)); }
1
// // Copyright (C) 2018 Susan H. Leung // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include "MolStandardize.h" #include "Metal.h" #include "Normalize.h" #include "Tautomer.h" #include "Fragment.h" #include <GraphMol/RDKitBase.h> #include <iostream> #include <GraphMol/ROMol.h> #include <GraphMol/MolOps.h> #include <GraphMol/MolStandardize/TransformCatalog/TransformCatalogParams.h> #include "Charge.h" #include <GraphMol/SmilesParse/SmilesWrite.h> #include <GraphMol/SmilesParse/SmilesParse.h> using namespace std; namespace RDKit { namespace MolStandardize { const CleanupParameters defaultCleanupParameters; RWMol *cleanup(const RWMol &mol, const CleanupParameters &params) { RWMol m(mol); MolOps::removeHs(m); MolStandardize::MetalDisconnector md; md.disconnect(m); RWMOL_SPTR normalized(MolStandardize::normalize(&m, params)); RWMol *reionized = MolStandardize::reionize(normalized.get(), params); MolOps::assignStereochemistry(*reionized); // update properties of reionized using m. reionized->updateProps(m); return reionized; } void tautomerParent(RWMol &mol, const CleanupParameters &params) { RDUNUSED_PARAM(mol); RDUNUSED_PARAM(params); UNDER_CONSTRUCTION("Not yet implemented"); } // Return the fragment parent of a given molecule. // The fragment parent is the largest organic covalent unit in the molecule. // RWMol *fragmentParent(const RWMol &mol, const CleanupParameters &params, bool skip_standardize) { const RWMol *cleaned = nullptr; if (!skip_standardize) { cleaned = cleanup(mol, params); } else { cleaned = &mol; } LargestFragmentChooser lfragchooser(params.preferOrganic); ROMol nm(*cleaned); ROMOL_SPTR lfrag(lfragchooser.choose(nm)); if (!skip_standardize) { delete cleaned; } return new RWMol(*lfrag); } void stereoParent(RWMol &mol, const CleanupParameters &params) { RDUNUSED_PARAM(mol); RDUNUSED_PARAM(params); UNDER_CONSTRUCTION("Not yet implemented"); } void isotopeParent(RWMol &mol, const CleanupParameters &params) { RDUNUSED_PARAM(mol); RDUNUSED_PARAM(params); UNDER_CONSTRUCTION("Not yet implemented"); } RWMol *chargeParent(const RWMol &mol, const CleanupParameters &params, bool skip_standardize) { // Return the charge parent of a given molecule. // The charge parent is the uncharged version of the fragment parent. const RWMol *m = nullptr; if (!skip_standardize) { m = cleanup(mol, params); } else { m = &mol; } RWMOL_SPTR fragparent(fragmentParent(*m, params, skip_standardize)); // if fragment... ROMol nm(*fragparent); Uncharger uncharger(params.doCanonical); ROMOL_SPTR uncharged(uncharger.uncharge(nm)); RWMol *omol = cleanup(static_cast<RWMol>(*uncharged), params); if (!skip_standardize) { delete m; } return omol; } void superParent(RWMol &mol, const CleanupParameters &params) { RDUNUSED_PARAM(mol); RDUNUSED_PARAM(params); UNDER_CONSTRUCTION("Not yet implemented"); } RWMol *normalize(const RWMol *mol, const CleanupParameters &params) { Normalizer normalizer(params.normalizations, params.maxRestarts); ROMol m(*mol); ROMol *normalized = normalizer.normalize(m); return static_cast<RWMol *>(normalized); } RWMol *reionize(const RWMol *mol, const CleanupParameters &params) { RDUNUSED_PARAM(params); Reionizer reionizer; ROMol m(*mol); ROMol *reionized = reionizer.reionize(m); return static_cast<RWMol *>(reionized); } std::string standardizeSmiles(const std::string &smiles) { RWMOL_SPTR mol(SmilesToMol(smiles, 0, false)); if (!mol) { std::string message = "SMILES Parse Error: syntax error for input: " + smiles; throw ValueErrorException(message); } CleanupParameters params; RWMOL_SPTR cleaned(cleanup(*mol, params)); return MolToSmiles(*cleaned); } std::vector<std::string> enumerateTautomerSmiles( const std::string &smiles, const CleanupParameters &params) { std::shared_ptr<RWMol> mol(SmilesToMol(smiles, 0, false)); cleanup(*mol, params); MolOps::sanitizeMol(*mol); auto *tautparams = new TautomerCatalogParams(params.tautomerTransforms); // unsigned int ntautomers = tautparams->getNumTautomers(); TautomerEnumerator te(new TautomerCatalog(tautparams)); std::vector<ROMOL_SPTR> res = te.enumerate(*mol); std::vector<std::string> tsmiles; for (const auto &r : res) { tsmiles.push_back(MolToSmiles(*r)); } return tsmiles; } } // end of namespace MolStandardize } // end of namespace RDKit
1
21,391
This looks to be a breaking change. I know it will affect some of my code that uses the Tautomer enumerator.
rdkit-rdkit
cpp
@@ -61,8 +61,12 @@ func (m *bucketlistMessage) String() string { if output.Format == "" { var lines []string lines = append(lines, fmt.Sprintf("Blockchain Node: %s", m.Node)) - for _, bucket := range m.Bucketlist { - lines = append(lines, bucket.String()) + if len(m.Bucketlist) == 0 { + lines = append(lines, "Empty bucketlist with given address") + } else { + for _, bucket := range m.Bucketlist { + lines = append(lines, bucket.String()) + } } return strings.Join(lines, "\n") }
1
// Copyright (c) 2020 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package bc import ( "context" "fmt" "strings" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" "github.com/spf13/cobra" "google.golang.org/grpc/status" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/ioctl/config" "github.com/iotexproject/iotex-core/ioctl/output" "github.com/iotexproject/iotex-core/ioctl/util" ) // Multi-language support var ( bcBucketListCmdShorts = map[config.Language]string{ config.English: "Get bucket list for given address on IoTeX blockchain", config.Chinese: "在IoTeX区块链上读取账户地址的投票列表", } bcBucketListUses = map[config.Language]string{ config.English: "bucketlist [ALIAS|ADDRESS]", config.Chinese: "bucketlist [别名|地址]", } ) // bcBucketListCmd represents the bc bucketlist command var bcBucketListCmd = &cobra.Command{ Use: config.TranslateInLang(bcBucketListUses, config.UILanguage), Short: config.TranslateInLang(bcBucketListCmdShorts, config.UILanguage), Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true arg := "" if len(args) == 1 { arg = args[0] } err := getBucketList(arg) return output.PrintError(err) }, } type bucketlistMessage struct { Node string `json:"node"` Bucketlist []*bucket `json:"bucketlist"` } func (m *bucketlistMessage) String() string { if output.Format == "" { var lines []string lines = append(lines, fmt.Sprintf("Blockchain Node: %s", m.Node)) for _, bucket := range m.Bucketlist { lines = append(lines, bucket.String()) } return strings.Join(lines, "\n") } return output.FormatString(output.Result, m) } // getBucketList get bucket list from chain func getBucketList(arg string) error { address, err := util.GetAddress(arg) if err != nil { return output.NewError(output.AddressError, "", err) } bl, err := getBucketListByAddress(address) if err != nil { return err } var bucketlist []*bucket for _, b := range bl.Buckets { bucket, err := newBucket(b) if err != nil { return err } bucketlist = append(bucketlist, bucket) } message := bucketlistMessage{ Node: config.ReadConfig.Endpoint, Bucketlist: bucketlist, } fmt.Println(message.String()) return nil } func getBucketListByAddress(addr string) (*iotextypes.VoteBucketList, error) { conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure) if err != nil { return nil, output.NewError(output.NetworkError, "failed to connect to endpoint", err) } defer conn.Close() cli := iotexapi.NewAPIServiceClient(conn) method := &iotexapi.ReadStakingDataMethod{ Method: iotexapi.ReadStakingDataMethod_BUCKETS_BY_VOTER, } methodData, err := proto.Marshal(method) if err != nil { return nil, output.NewError(output.SerializationError, "failed to marshal read staking data method", err) } readStakingdataRequest := &iotexapi.ReadStakingDataRequest{ Request: &iotexapi.ReadStakingDataRequest_BucketsByVoter{ BucketsByVoter: &iotexapi.ReadStakingDataRequest_VoteBucketsByVoter{ VoterAddress: addr, Pagination: &iotexapi.PaginationParam{ Offset: uint32(0), Limit: uint32(1000), }, }, }, } requestData, err := proto.Marshal(readStakingdataRequest) if err != nil { return nil, output.NewError(output.SerializationError, "failed to marshal read staking data request", err) } request := &iotexapi.ReadStateRequest{ ProtocolID: []byte("staking"), MethodName: methodData, Arguments: [][]byte{requestData}, } ctx := context.Background() jwtMD, err := util.JwtAuth() if err == nil { ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx) } response, err := cli.ReadState(ctx, request) if err != nil { sta, ok := status.FromError(err) if ok { return nil, output.NewError(output.APIError, sta.Message(), nil) } return nil, output.NewError(output.NetworkError, "failed to invoke ReadState api", err) } bucketlist := iotextypes.VoteBucketList{} if err := proto.Unmarshal(response.Data, &bucketlist); err != nil { return nil, output.NewError(output.SerializationError, "failed to unmarshal response", err) } return &bucketlist, nil }
1
21,844
Perhaps remove this line
iotexproject-iotex-core
go
@@ -707,6 +707,9 @@ void removeHs(RWMol &mol, const RemoveHsParameters &ps, bool sanitize) { if (!ps.removeMapped && atom->getAtomMapNum()) { continue; } + if (!ps.removeHydrides && atom->getAtomicNum() == 1 && atom->getFormalCharge() == -1) { + continue; + } bool removeIt = true; if (atom->getDegree() && (!ps.removeDummyNeighbors || !ps.removeDefiningBondStereo ||
1
// // Copyright (C) 2003-2019 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include "RDKitBase.h" #include <list> #include "QueryAtom.h" #include "QueryOps.h" #include "MonomerInfo.h" #include <Geometry/Transform3D.h> #include <Geometry/point.h> #include <boost/foreach.hpp> #include <boost/lexical_cast.hpp> #include <boost/dynamic_bitset.hpp> namespace RDKit { // Local utility functionality: namespace { Atom *getAtomNeighborNot(ROMol *mol, const Atom *atom, const Atom *other) { PRECONDITION(mol, "bad molecule"); PRECONDITION(atom, "bad atom"); PRECONDITION(atom->getDegree() > 1, "bad degree"); PRECONDITION(other, "bad atom"); Atom *res = nullptr; ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(atom); while (nbrIdx != endNbrs) { if (*nbrIdx != other->getIdx()) { res = mol->getAtomWithIdx(*nbrIdx); break; } ++nbrIdx; } POSTCONDITION(res, "no neighbor found"); return res; } void setHydrogenCoords(ROMol *mol, unsigned int hydIdx, unsigned int heavyIdx) { // we will loop over all the coordinates PRECONDITION(mol, "bad molecule"); PRECONDITION(heavyIdx != hydIdx, "degenerate atoms"); Atom *hydAtom = mol->getAtomWithIdx(hydIdx); PRECONDITION(mol->getAtomDegree(hydAtom) == 1, "bad atom degree"); const Bond *bond = mol->getBondBetweenAtoms(heavyIdx, hydIdx); PRECONDITION(bond, "no bond between atoms"); const Atom *heavyAtom = mol->getAtomWithIdx(heavyIdx); double bondLength = PeriodicTable::getTable()->getRb0(1) + PeriodicTable::getTable()->getRb0(heavyAtom->getAtomicNum()); RDGeom::Point3D dirVect(0, 0, 0); RDGeom::Point3D perpVect, rotnAxis, nbrPerp; RDGeom::Point3D nbr1Vect, nbr2Vect, nbr3Vect; RDGeom::Transform3D tform; RDGeom::Point3D heavyPos, hydPos; const Atom *nbr1 = nullptr, *nbr2 = nullptr, *nbr3 = nullptr; const Bond *nbrBond; ROMol::ADJ_ITER nbrIdx, endNbrs; switch (heavyAtom->getDegree()) { case 1: // -------------------------------------------------------------------------- // No other atoms present: // -------------------------------------------------------------------------- // loop over the conformations and set the coordinates for (auto cfi = mol->beginConformers(); cfi != mol->endConformers(); cfi++) { if ((*cfi)->is3D()) { dirVect.z = 1; } else { dirVect.x = 1; } heavyPos = (*cfi)->getAtomPos(heavyIdx); hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); } break; case 2: // -------------------------------------------------------------------------- // One other neighbor: // -------------------------------------------------------------------------- nbr1 = getAtomNeighborNot(mol, heavyAtom, hydAtom); for (auto cfi = mol->beginConformers(); cfi != mol->endConformers(); ++cfi) { heavyPos = (*cfi)->getAtomPos(heavyIdx); RDGeom::Point3D nbr1Pos = (*cfi)->getAtomPos(nbr1->getIdx()); // get a normalized vector pointing away from the neighbor: nbr1Vect = nbr1Pos - heavyPos; if (fabs(nbr1Vect.lengthSq()) < 1e-4) { // no difference, which likely indicates that we have redundant atoms. // just put it on top of the heavy atom. This was #678 (*cfi)->setAtomPos(hydIdx, heavyPos); continue; } nbr1Vect.normalize(); nbr1Vect *= -1; // ok, nbr1Vect points away from the other atom, figure out where // this H goes: switch (heavyAtom->getHybridization()) { case Atom::SP3: // get a perpendicular to nbr1Vect: if ((*cfi)->is3D()) { perpVect = nbr1Vect.getPerpendicular(); } else { perpVect.z = 1.0; } // and move off it: tform.SetRotation((180 - 109.471) * M_PI / 180., perpVect); dirVect = tform * nbr1Vect; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; case Atom::SP2: // default position is to just take an arbitrary perpendicular: perpVect = nbr1Vect.getPerpendicular(); if (nbr1->getDegree() > 1) { // can we use the neighboring atom to establish a perpendicular? nbrBond = mol->getBondBetweenAtoms(heavyIdx, nbr1->getIdx()); if (nbrBond->getIsAromatic() || nbrBond->getBondType() == Bond::DOUBLE) { nbr2 = getAtomNeighborNot(mol, nbr1, heavyAtom); nbr2Vect = nbr1Pos.directionVector((*cfi)->getAtomPos(nbr2->getIdx())); perpVect = nbr2Vect.crossProduct(nbr1Vect); } } perpVect.normalize(); // rotate the nbr1Vect 60 degrees about perpVect and we're done: tform.SetRotation(60. * M_PI / 180., perpVect); dirVect = tform * nbr1Vect; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; case Atom::SP: // just lay the H along the vector: dirVect = nbr1Vect; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; default: // FIX: handle other hybridizations // for now, just lay the H along the vector: dirVect = nbr1Vect; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); } } break; case 3: // -------------------------------------------------------------------------- // Two other neighbors: // -------------------------------------------------------------------------- boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(heavyAtom); while (nbrIdx != endNbrs) { if (*nbrIdx != hydIdx) { if (!nbr1) { nbr1 = mol->getAtomWithIdx(*nbrIdx); } else { nbr2 = mol->getAtomWithIdx(*nbrIdx); } } ++nbrIdx; } TEST_ASSERT(nbr1); TEST_ASSERT(nbr2); for (auto cfi = mol->beginConformers(); cfi != mol->endConformers(); ++cfi) { // start along the average of the two vectors: heavyPos = (*cfi)->getAtomPos(heavyIdx); nbr1Vect = heavyPos - (*cfi)->getAtomPos(nbr1->getIdx()); nbr2Vect = heavyPos - (*cfi)->getAtomPos(nbr2->getIdx()); if (fabs(nbr1Vect.lengthSq()) < 1e-4 || fabs(nbr2Vect.lengthSq()) < 1e-4) { // no difference, which likely indicates that we have redundant atoms. // just put it on top of the heavy atom. This was #678 (*cfi)->setAtomPos(hydIdx, heavyPos); continue; } nbr1Vect.normalize(); nbr2Vect.normalize(); dirVect = nbr1Vect + nbr2Vect; dirVect.normalize(); if ((*cfi)->is3D()) { switch (heavyAtom->getHybridization()) { case Atom::SP3: // get the perpendicular to the neighbors: nbrPerp = nbr1Vect.crossProduct(nbr2Vect); // and the perpendicular to that: rotnAxis = nbrPerp.crossProduct(dirVect); // and then rotate about that: rotnAxis.normalize(); tform.SetRotation((109.471 / 2) * M_PI / 180., rotnAxis); dirVect = tform * dirVect; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; case Atom::SP2: // don't need to do anything here, the H atom goes right on the // direction vector hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; default: // FIX: handle other hybridizations // for now, just lay the H along the neighbor vector; hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); break; } } else { // don't need to do anything here, the H atom goes right on the // direction vector hydPos = heavyPos + dirVect; (*cfi)->setAtomPos(hydIdx, hydPos); } } break; case 4: // -------------------------------------------------------------------------- // Three other neighbors: // -------------------------------------------------------------------------- boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(heavyAtom); if (heavyAtom->hasProp(common_properties::_CIPCode)) { // if the central atom is chiral, we'll order the neighbors // by CIP rank: std::vector<std::pair<unsigned int, int>> nbrs; while (nbrIdx != endNbrs) { if (*nbrIdx != hydIdx) { const Atom *tAtom = mol->getAtomWithIdx(*nbrIdx); unsigned int cip = 0; tAtom->getPropIfPresent<unsigned int>(common_properties::_CIPRank, cip); nbrs.push_back(std::make_pair(cip, rdcast<int>(*nbrIdx))); } ++nbrIdx; } std::sort(nbrs.begin(), nbrs.end()); nbr1 = mol->getAtomWithIdx(nbrs[0].second); nbr2 = mol->getAtomWithIdx(nbrs[1].second); nbr3 = mol->getAtomWithIdx(nbrs[2].second); } else { // central atom isn't chiral, so the neighbor ordering isn't important: while (nbrIdx != endNbrs) { if (*nbrIdx != hydIdx) { if (!nbr1) { nbr1 = mol->getAtomWithIdx(*nbrIdx); } else if (!nbr2) { nbr2 = mol->getAtomWithIdx(*nbrIdx); } else { nbr3 = mol->getAtomWithIdx(*nbrIdx); } } ++nbrIdx; } } TEST_ASSERT(nbr1); TEST_ASSERT(nbr2); TEST_ASSERT(nbr3); for (auto cfi = mol->beginConformers(); cfi != mol->endConformers(); ++cfi) { // use the average of the three vectors: heavyPos = (*cfi)->getAtomPos(heavyIdx); nbr1Vect = heavyPos - (*cfi)->getAtomPos(nbr1->getIdx()); nbr2Vect = heavyPos - (*cfi)->getAtomPos(nbr2->getIdx()); nbr3Vect = heavyPos - (*cfi)->getAtomPos(nbr3->getIdx()); if (fabs(nbr1Vect.lengthSq()) < 1e-4 || fabs(nbr2Vect.lengthSq()) < 1e-4 || fabs(nbr3Vect.lengthSq()) < 1e-4) { // no difference, which likely indicates that we have redundant atoms. // just put it on top of the heavy atom. This was #678 (*cfi)->setAtomPos(hydIdx, heavyPos); continue; } nbr1Vect.normalize(); nbr2Vect.normalize(); nbr3Vect.normalize(); // if three neighboring atoms are more or less planar, this // is going to be in a quasi-random (but almost definitely bad) // direction... // correct for this (issue 2951221): if ((*cfi)->is3D()) { if (fabs(nbr3Vect.dotProduct(nbr1Vect.crossProduct(nbr2Vect))) < 0.1) { // compute the normal: dirVect = nbr1Vect.crossProduct(nbr2Vect); std::string cipCode; if (heavyAtom->getPropIfPresent(common_properties::_CIPCode, cipCode)) { // the heavy atom is a chiral center, make sure // that we went go the right direction to preserve // its chirality. We use the chiral volume for this: RDGeom::Point3D v1 = dirVect - nbr3Vect; RDGeom::Point3D v2 = nbr1Vect - nbr3Vect; RDGeom::Point3D v3 = nbr2Vect - nbr3Vect; double vol = v1.dotProduct(v2.crossProduct(v3)); // FIX: this is almost certainly wrong and should use the chiral // tag if ((cipCode == "S" && vol < 0) || (cipCode == "R" && vol > 0)) { dirVect *= -1; } } } else { dirVect = nbr1Vect + nbr2Vect + nbr3Vect; } } else { // we're in flatland // this was github #908 // We're in a 2D conformation, put the H between the two neighbors // that have the widest angle between them: double minDot = nbr1Vect.dotProduct(nbr2Vect); dirVect = nbr1Vect + nbr2Vect; if (nbr2Vect.dotProduct(nbr3Vect) < minDot) { minDot = nbr2Vect.dotProduct(nbr3Vect); dirVect = nbr2Vect + nbr3Vect; } if (nbr1Vect.dotProduct(nbr3Vect) < minDot) { minDot = nbr1Vect.dotProduct(nbr3Vect); dirVect = nbr1Vect + nbr3Vect; } dirVect *= -1; } dirVect.normalize(); hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0); (*cfi)->setAtomPos(hydIdx, hydPos); } break; default: // -------------------------------------------------------------------------- // FIX: figure out what to do here // -------------------------------------------------------------------------- hydPos = heavyPos + dirVect * bondLength; for (auto cfi = mol->beginConformers(); cfi != mol->endConformers(); ++cfi) { (*cfi)->setAtomPos(hydIdx, hydPos); } break; } } void AssignHsResidueInfo(RWMol &mol) { int max_serial = 0; unsigned int stopIdx = mol.getNumAtoms(); for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) { auto *info = (AtomPDBResidueInfo *)(mol.getAtomWithIdx(aidx)->getMonomerInfo()); if (info && info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE && info->getSerialNumber() > max_serial) { max_serial = info->getSerialNumber(); } } AtomPDBResidueInfo *current_info = nullptr; int current_h_id = 0; for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) { Atom *newAt = mol.getAtomWithIdx(aidx); auto *info = (AtomPDBResidueInfo *)(newAt->getMonomerInfo()); if (info && info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE) { ROMol::ADJ_ITER begin, end; boost::tie(begin, end) = mol.getAtomNeighbors(newAt); while (begin != end) { if (mol.getAtomWithIdx(*begin)->getAtomicNum() == 1) { // Make all Hs unique - increment id even for existing ++current_h_id; // skip if hydrogen already has PDB info auto *h_info = (AtomPDBResidueInfo *)mol.getAtomWithIdx(*begin) ->getMonomerInfo(); if (h_info && h_info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE) { continue; } // the hydrogens have unique names on residue basis (H1, H2, ...) if (!current_info || current_info->getResidueNumber() != info->getResidueNumber() || current_info->getChainId() != info->getChainId()) { current_h_id = 1; current_info = info; } std::string h_label = std::to_string(current_h_id); if (h_label.length() > 3) { h_label = h_label.substr(h_label.length() - 3, 3); } while (h_label.length() < 3) { h_label = h_label + " "; } h_label = "H" + h_label; // wrap around id to '3H12' h_label = h_label.substr(3, 1) + h_label.substr(0, 3); AtomPDBResidueInfo *newInfo = new AtomPDBResidueInfo( h_label, max_serial, "", info->getResidueName(), info->getResidueNumber(), info->getChainId(), "", 1.0, 0.0, info->getIsHeteroAtom()); mol.getAtomWithIdx(*begin)->setMonomerInfo(newInfo); ++max_serial; } ++begin; } } } } } // end of unnamed namespace namespace MolOps { void addHs(RWMol &mol, bool explicitOnly, bool addCoords, const UINT_VECT *onlyOnAtoms, bool addResidueInfo) { // when we hit each atom, clear its computed properties // NOTE: it is essential that we not clear the ring info in the // molecule's computed properties. We don't want to have to // regenerate that. This caused Issue210 and Issue212: mol.clearComputedProps(false); // precompute the number of hydrogens we are going to add so that we can // pre-allocate the necessary space on the conformations of the molecule // for their coordinates unsigned int numAddHyds = 0; for (auto at : mol.atoms()) { if (!onlyOnAtoms || std::find(onlyOnAtoms->begin(), onlyOnAtoms->end(), at->getIdx()) != onlyOnAtoms->end()) { numAddHyds += at->getNumExplicitHs(); if (!explicitOnly) { numAddHyds += at->getNumImplicitHs(); } } } unsigned int nSize = mol.getNumAtoms() + numAddHyds; // loop over the conformations of the molecule and allocate new space // for the H locations (need to do this even if we aren't adding coords so // that the conformers have the correct number of atoms). for (auto cfi = mol.beginConformers(); cfi != mol.endConformers(); ++cfi) { (*cfi)->reserve(nSize); } unsigned int stopIdx = mol.getNumAtoms(); for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) { if (onlyOnAtoms && std::find(onlyOnAtoms->begin(), onlyOnAtoms->end(), aidx) == onlyOnAtoms->end()) { continue; } Atom *newAt = mol.getAtomWithIdx(aidx); unsigned int newIdx; newAt->clearComputedProps(); // always convert explicit Hs unsigned int onumexpl = newAt->getNumExplicitHs(); for (unsigned int i = 0; i < onumexpl; i++) { newIdx = mol.addAtom(new Atom(1), false, true); mol.addBond(aidx, newIdx, Bond::SINGLE); mol.getAtomWithIdx(newIdx)->updatePropertyCache(); if (addCoords) { setHydrogenCoords(&mol, newIdx, aidx); } } // clear the local property newAt->setNumExplicitHs(0); if (!explicitOnly) { // take care of implicits for (unsigned int i = 0; i < mol.getAtomWithIdx(aidx)->getNumImplicitHs(); i++) { newIdx = mol.addAtom(new Atom(1), false, true); mol.addBond(aidx, newIdx, Bond::SINGLE); // set the isImplicit label so that we can strip these back // off later if need be. mol.getAtomWithIdx(newIdx)->setProp(common_properties::isImplicit, 1); mol.getAtomWithIdx(newIdx)->updatePropertyCache(); if (addCoords) { setHydrogenCoords(&mol, newIdx, aidx); } } // be very clear about implicits not being allowed in this representation newAt->setProp(common_properties::origNoImplicit, newAt->getNoImplicit(), true); newAt->setNoImplicit(true); } // update the atom's derived properties (valence count, etc.) // no sense in being strict here (was github #2782) newAt->updatePropertyCache(false); } // take care of AtomPDBResidueInfo for Hs if root atom has it if (addResidueInfo) { AssignHsResidueInfo(mol); } } ROMol *addHs(const ROMol &mol, bool explicitOnly, bool addCoords, const UINT_VECT *onlyOnAtoms, bool addResidueInfo) { auto *res = new RWMol(mol); addHs(*res, explicitOnly, addCoords, onlyOnAtoms, addResidueInfo); return static_cast<ROMol *>(res); }; namespace { // returns whether or not an adjustment was made, in case we want that info bool adjustStereoAtomsIfRequired(RWMol &mol, const Atom *atom, const Atom *heavyAtom) { PRECONDITION(atom != nullptr, "bad atom"); PRECONDITION(heavyAtom != nullptr, "bad heavy atom"); // nothing we can do if the degree is only 2 (and we should have covered // that earlier anyway) if (heavyAtom->getDegree() == 2) { return false; } const auto &cbnd = mol.getBondBetweenAtoms(atom->getIdx(), heavyAtom->getIdx()); if (!cbnd) { return false; } for (const auto &nbri : boost::make_iterator_range(mol.getAtomBonds(heavyAtom))) { Bond *bnd = mol[nbri]; if (bnd->getBondType() == Bond::DOUBLE && bnd->getStereo() > Bond::STEREOANY) { auto sAtomIt = std::find(bnd->getStereoAtoms().begin(), bnd->getStereoAtoms().end(), atom->getIdx()); if (sAtomIt != bnd->getStereoAtoms().end()) { // sAtomIt points to the position of this atom's index in the list. // find the index of another atom attached to the heavy atom and // use it to update sAtomIt unsigned int dblNbrIdx = bnd->getOtherAtomIdx(heavyAtom->getIdx()); for (const auto &nbri : boost::make_iterator_range(mol.getAtomNeighbors(heavyAtom))) { const auto &nbr = mol[nbri]; if (nbr->getIdx() == dblNbrIdx || nbr->getIdx() == atom->getIdx()) { continue; } *sAtomIt = nbr->getIdx(); bool madeAdjustment = true; switch (bnd->getStereo()) { case Bond::STEREOCIS: bnd->setStereo(Bond::STEREOTRANS); break; case Bond::STEREOTRANS: bnd->setStereo(Bond::STEREOCIS); break; default: // I think we shouldn't need to do anything with E and Z... madeAdjustment = false; break; } return madeAdjustment; } } } } return false; } void molRemoveH(RWMol &mol, unsigned int idx, bool updateExplicitCount) { auto atom = mol.getAtomWithIdx(idx); PRECONDITION(atom->getAtomicNum() == 1, "idx corresponds to a non-Hydrogen"); for (const auto &nbri : boost::make_iterator_range(mol.getAtomBonds(atom))) { const Bond *bond = mol[nbri]; Atom *heavyAtom = bond->getOtherAtom(atom); int heavyAtomNum = heavyAtom->getAtomicNum(); // we'll update the neighbor's explicit H count if we were told to // *or* if the neighbor is chiral, in which case the H is needed // in order to complete the coordination // *or* if the neighbor has the noImplicit flag set: if (updateExplicitCount || heavyAtom->getNoImplicit() || heavyAtom->getChiralTag() != Atom::CHI_UNSPECIFIED) { heavyAtom->setNumExplicitHs(heavyAtom->getNumExplicitHs() + 1); } else { // this is a special case related to Issue 228 and the // "disappearing Hydrogen" problem discussed in MolOps::adjustHs // // If we remove a hydrogen from an aromatic N or P, or if // the heavy atom it is connected to is not in its default // valence state, we need to be *sure* to increment the // explicit count, even if the H itself isn't marked as explicit const INT_VECT &defaultVs = PeriodicTable::getTable()->getValenceList(heavyAtomNum); if (((heavyAtomNum == 7 || heavyAtomNum == 15) && heavyAtom->getIsAromatic()) || (std::find(defaultVs.begin() + 1, defaultVs.end(), heavyAtom->getTotalValence()) != defaultVs.end())) { heavyAtom->setNumExplicitHs(heavyAtom->getNumExplicitHs() + 1); } } // One other consequence of removing the H from the graph is // that we may change the ordering of the bonds about a // chiral center. This may change the chiral label at that // atom. We deal with that by explicitly checking here: if (heavyAtom->getChiralTag() != Atom::CHI_UNSPECIFIED) { INT_LIST neighborIndices; for (const auto &nbri : boost::make_iterator_range(mol.getAtomBonds(heavyAtom))) { Bond *nbnd = mol[nbri]; if (nbnd->getIdx() != bond->getIdx()) { neighborIndices.push_back(nbnd->getIdx()); } } neighborIndices.push_back(bond->getIdx()); int nSwaps = heavyAtom->getPerturbationOrder(neighborIndices); // std::cerr << "H: "<<atom->getIdx()<<" hvy: // "<<heavyAtom->getIdx()<<" swaps: " << nSwaps<<std::endl; if (nSwaps % 2) { heavyAtom->invertChirality(); } } // if it's a wavy bond, then we need to // mark the beginning atom with the _UnknownStereo tag. // so that we know later that something was affecting its // stereochem if (bond->getBondDir() == Bond::UNKNOWN && bond->getBeginAtomIdx() == heavyAtom->getIdx()) { heavyAtom->setProp(common_properties::_UnknownStereo, 1); } else if (bond->getBondDir() == Bond::ENDDOWNRIGHT || bond->getBondDir() == Bond::ENDUPRIGHT) { // if the direction is set on this bond and the atom it's connected to // has no other single bonds with directions set, then we need to set // direction on one of the other neighbors in order to avoid double // bond stereochemistry possibly being lost. This was github #754 bool foundADir = false; Bond *oBond = nullptr; for (const auto &nbri : boost::make_iterator_range(mol.getAtomBonds(heavyAtom))) { Bond *nbnd = mol[nbri]; if (nbnd->getIdx() != bond->getIdx() && nbnd->getBondType() == Bond::SINGLE) { if (nbnd->getBondDir() == Bond::NONE) { oBond = nbnd; } else { foundADir = true; } } } if (!foundADir && oBond != nullptr) { bool flipIt = (oBond->getBeginAtom() == heavyAtom) && (bond->getBeginAtom() == heavyAtom); if (flipIt) { oBond->setBondDir(bond->getBondDir() == Bond::ENDDOWNRIGHT ? Bond::ENDUPRIGHT : Bond::ENDDOWNRIGHT); } else { oBond->setBondDir(bond->getBondDir()); } } // if this atom is one of the stereoatoms for a double bond we need // to switch the stereo atom on this end to be the other neighbor // This was part of github #1810 adjustStereoAtomsIfRequired(mol, atom, heavyAtom); } else { // if this atom is one of the stereoatoms for a double bond we need // to switch the stereo atom on this end to be the other neighbor // This was part of github #1810 adjustStereoAtomsIfRequired(mol, atom, heavyAtom); } } mol.removeAtom(atom); } } // end of anonymous namespace void removeHs(RWMol &mol, const RemoveHsParameters &ps, bool sanitize) { for (auto atom : mol.atoms()) { atom->updatePropertyCache(false); } boost::dynamic_bitset<> atomsToRemove{mol.getNumAtoms(), 0}; for (auto atom : mol.atoms()) { if (atom->getAtomicNum() != 1) { continue; } if (!ps.removeWithQuery && atom->hasQuery()) { continue; } if (!ps.removeDegreeZero && !atom->getDegree()) { if (ps.showWarnings) { BOOST_LOG(rdWarningLog) << "WARNING: not removing hydrogen atom without neighbors" << std::endl; } continue; } if (!ps.removeHigherDegrees && atom->getDegree() > 1) { continue; } if (!ps.removeIsotopes && atom->getIsotope()) { continue; } if (!ps.removeNonimplicit && !atom->hasProp(common_properties::isImplicit)) { continue; } if (!ps.removeMapped && atom->getAtomMapNum()) { continue; } bool removeIt = true; if (atom->getDegree() && (!ps.removeDummyNeighbors || !ps.removeDefiningBondStereo || !ps.removeOnlyHNeighbors)) { bool onlyHNeighbors = true; ROMol::ADJ_ITER begin, end; boost::tie(begin, end) = mol.getAtomNeighbors(atom); while (begin != end && removeIt) { auto nbr = mol.getAtomWithIdx(*begin); // is it a dummy? if (!ps.removeDummyNeighbors && nbr->getAtomicNum() < 1) { removeIt = false; if (ps.showWarnings) { BOOST_LOG(rdWarningLog) << "WARNING: not removing hydrogen atom " "with dummy atom neighbors" << std::endl; } } if (!ps.removeOnlyHNeighbors && nbr->getAtomicNum() != 1) { onlyHNeighbors = false; } if (!ps.removeWithWedgedBond) { const auto bnd = mol.getBondBetweenAtoms(atom->getIdx(), nbr->getIdx()); if (bnd->getBondDir() == Bond::BEGINDASH || bnd->getBondDir() == Bond::BEGINWEDGE) { removeIt = false; if (ps.showWarnings) { BOOST_LOG(rdWarningLog) << "WARNING: not removing hydrogen atom " "with wedged bond" << std::endl; } } } // Check to see if the neighbor has a double bond and we're the only // neighbor at this end. This was part of github #1810 if (!ps.removeDefiningBondStereo && nbr->getDegree() == 2) { for (const auto &nbri : boost::make_iterator_range(mol.getAtomBonds(nbr))) { const Bond *bnd = mol[nbri]; if (bnd->getBondType() == Bond::DOUBLE && (bnd->getStereo() > Bond::STEREOANY || mol.getBondBetweenAtoms(atom->getIdx(), nbr->getIdx()) ->getBondDir() > Bond::NONE)) { removeIt = false; break; } } } ++begin; } if (removeIt && (!ps.removeOnlyHNeighbors && onlyHNeighbors)) { removeIt = false; } } if (removeIt) { atomsToRemove.set(atom->getIdx()); } } // end of the loop over atoms // now that we know which atoms need to be removed, go ahead and remove them for (int idx = mol.getNumAtoms() - 1; idx >= 0; --idx) { if (atomsToRemove[idx]) { molRemoveH(mol, idx, ps.updateExplicitCount); } } // // If we didn't only remove implicit Hs, which are guaranteed to // be the highest numbered atoms, we may have altered atom indices. // This can screw up derived properties (such as ring members), so // do some checks: // if (!atomsToRemove.empty() && ps.removeNonimplicit && sanitize) { sanitizeMol(mol); } }; ROMol *removeHs(const ROMol &mol, const RemoveHsParameters &ps, bool sanitize) { auto *res = new RWMol(mol); try { removeHs(*res, ps, sanitize); } catch (const MolSanitizeException &) { delete res; throw; } return static_cast<ROMol *>(res); } void removeHs(RWMol &mol, bool implicitOnly, bool updateExplicitCount, bool sanitize) { RemoveHsParameters ps; ps.removeNonimplicit = !implicitOnly; ps.updateExplicitCount = updateExplicitCount; removeHs(mol, ps, sanitize); }; ROMol *removeHs(const ROMol &mol, bool implicitOnly, bool updateExplicitCount, bool sanitize) { auto *res = new RWMol(mol); try { removeHs(*res, implicitOnly, updateExplicitCount, sanitize); } catch (const MolSanitizeException &) { delete res; throw; } return static_cast<ROMol *>(res); } void removeAllHs(RWMol &mol, bool sanitize) { RemoveHsParameters ps; ps.removeDegreeZero = true; ps.removeHigherDegrees = true; ps.removeOnlyHNeighbors = true; ps.removeIsotopes = true; ps.removeDummyNeighbors = true; ps.removeDefiningBondStereo = true; ps.removeWithWedgedBond = true; ps.removeWithQuery = true; ps.removeNonimplicit = true; ps.showWarnings = false; removeHs(mol, ps, sanitize); }; ROMol *removeAllHs(const ROMol &mol, bool sanitize) { auto *res = new RWMol(mol); try { removeAllHs(*res, sanitize); } catch (const MolSanitizeException &) { delete res; throw; } return static_cast<ROMol *>(res); } namespace { bool isQueryH(const Atom *atom) { PRECONDITION(atom, "bogus atom"); if (atom->getAtomicNum() == 1) { // the simple case: the atom is flagged as being an H and // has no query if (!atom->hasQuery() || (!atom->getQuery()->getNegation() && atom->getQuery()->getDescription() == "AtomAtomicNum")) { return true; } } if (atom->getDegree() != 1) { // only degree 1 return false; } if (atom->hasQuery() && atom->getQuery()->getNegation()) { // we will not merge negated queries return false; } bool hasHQuery = false, hasOr = false; if (atom->hasQuery()) { if (atom->getQuery()->getDescription() == "AtomOr") { hasOr = true; } std::list<QueryAtom::QUERYATOM_QUERY::CHILD_TYPE> childStack( atom->getQuery()->beginChildren(), atom->getQuery()->endChildren()); // the logic gets too complicated if there's an OR in the children, so // just punt on those (with a warning) while (!(hasHQuery && hasOr) && childStack.size()) { QueryAtom::QUERYATOM_QUERY::CHILD_TYPE query = childStack.front(); childStack.pop_front(); if (query->getDescription() == "AtomOr") { hasOr = true; } else if (query->getDescription() == "AtomAtomicNum") { if (static_cast<ATOM_EQUALS_QUERY *>(query.get())->getVal() == 1 && !query->getNegation()) { hasHQuery = true; } } else { QueryAtom::QUERYATOM_QUERY::CHILD_VECT_CI child1; for (child1 = query->beginChildren(); child1 != query->endChildren(); ++child1) { childStack.push_back(*child1); } } } // std::cerr<<" !!!1 "<<atom->getIdx()<<" "<<hasHQuery<<" // "<<hasOr<<std::endl; if (hasHQuery && hasOr) { BOOST_LOG(rdWarningLog) << "WARNING: merging explicit H queries involved " "in ORs is not supported. This query will not " "be merged" << std::endl; return false; } } return hasHQuery; } } // namespace // // This routine removes explicit hydrogens (and bonds to them) from // the molecular graph and adds them as queries to the heavy atoms // to which they are bound. If the heavy atoms (or atom queries) // already have hydrogen-count queries, they will be updated. // // NOTE: // - Hydrogens which aren't connected to a heavy atom will not be // removed. This prevents molecules like "[H][H]" from having // all atoms removed. // // - By default all hydrogens are removed, however if // merge_unmapped_only is true, any hydrogen participating // in an atom map will be retained void mergeQueryHs(RWMol &mol, bool mergeUnmappedOnly) { std::vector<unsigned int> atomsToRemove; boost::dynamic_bitset<> hatoms(mol.getNumAtoms()); for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) { hatoms[i] = isQueryH(mol.getAtomWithIdx(i)); } unsigned int currIdx = 0, stopIdx = mol.getNumAtoms(); while (currIdx < stopIdx) { Atom *atom = mol.getAtomWithIdx(currIdx); if (!hatoms[currIdx]) { unsigned int numHsToRemove = 0; ROMol::ADJ_ITER begin, end; boost::tie(begin, end) = mol.getAtomNeighbors(atom); while (begin != end) { if (hatoms[*begin]) { Atom &bgn = *mol.getAtomWithIdx(*begin); if (!mergeUnmappedOnly || !bgn.hasProp(common_properties::molAtomMapNumber)) { atomsToRemove.push_back(rdcast<unsigned int>(*begin)); ++numHsToRemove; } } ++begin; } if (numHsToRemove) { // // We have H neighbors: // Add the appropriate queries to compensate for their removal. // // Examples: // C[H] -> [C;!H0] // C([H])[H] -> [C;!H0;!H1] // // It would be more efficient to do this using range queries like: // C([H])[H] -> [C;H{2-}] // but that would produce non-standard SMARTS without the user // having started with a non-standard SMARTS. // if (!atom->hasQuery()) { // it wasn't a query atom, we need to replace it so that we can add // a query: ATOM_EQUALS_QUERY *tmp = makeAtomNumQuery(atom->getAtomicNum()); auto *newAt = new QueryAtom; newAt->setQuery(tmp); newAt->updateProps(*atom); mol.replaceAtom(atom->getIdx(), newAt); delete newAt; atom = mol.getAtomWithIdx(currIdx); } for (unsigned int i = 0; i < numHsToRemove; ++i) { ATOM_EQUALS_QUERY *tmp = makeAtomHCountQuery(i); tmp->setNegation(true); atom->expandQuery(tmp); } } // end of numHsToRemove test // recurse if needed (was github isusue 544) if (atom->hasQuery()) { // std::cerr<<" q: "<<atom->getQuery()->getDescription()<<std::endl; if (atom->getQuery()->getDescription() == "RecursiveStructure") { auto *rqm = static_cast<RWMol *>(const_cast<ROMol *>( static_cast<RecursiveStructureQuery *>(atom->getQuery()) ->getQueryMol())); mergeQueryHs(*rqm, mergeUnmappedOnly); } // FIX: shouldn't be repeating this code here std::list<QueryAtom::QUERYATOM_QUERY::CHILD_TYPE> childStack( atom->getQuery()->beginChildren(), atom->getQuery()->endChildren()); while (childStack.size()) { QueryAtom::QUERYATOM_QUERY::CHILD_TYPE qry = childStack.front(); childStack.pop_front(); // std::cerr<<" child: "<<qry->getDescription()<<std::endl; if (qry->getDescription() == "RecursiveStructure") { // std::cerr<<" recurse"<<std::endl; auto *rqm = static_cast<RWMol *>(const_cast<ROMol *>( static_cast<RecursiveStructureQuery *>(qry.get()) ->getQueryMol())); mergeQueryHs(*rqm, mergeUnmappedOnly); // std::cerr<<" back"<<std::endl; } else if (qry->beginChildren() != qry->endChildren()) { childStack.insert(childStack.end(), qry->beginChildren(), qry->endChildren()); } } } // end of recursion loop } ++currIdx; } std::sort(atomsToRemove.begin(), atomsToRemove.end()); for (std::vector<unsigned int>::const_reverse_iterator aiter = atomsToRemove.rbegin(); aiter != atomsToRemove.rend(); ++aiter) { Atom *atom = mol.getAtomWithIdx(*aiter); mol.removeAtom(atom); } }; ROMol *mergeQueryHs(const ROMol &mol, bool mergeUnmappedOnly) { auto *res = new RWMol(mol); mergeQueryHs(*res, mergeUnmappedOnly); return static_cast<ROMol *>(res); }; }; // end of namespace MolOps }; // end of namespace RDKit
1
20,899
Ah, missed this one. Sorry: you don't need to test the atomic number here. that's taken care of above on line 683.
rdkit-rdkit
cpp
@@ -146,8 +146,11 @@ func (c *roundCalculator) roundInfo( } } if !lastBlockTime.Before(now) { + // TODO: if this is the case, the system time is far behind the time of other nodes. + // the code below is just to mute the warning, but "panic" may be a better choice. + time.Sleep(lastBlockTime.Sub(now)) err = errors.Errorf( - "last block time %s is a future time, vs now %s", + "last block time %s is a future time, vs now %s. it seems that your system time is far behind.\nplease calibrate your system time and restart the chain.", lastBlockTime, now, )
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package rolldpos import ( "time" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/endorsement" ) type roundCalculator struct { chain ChainManager timeBasedRotation bool rp *rolldpos.Protocol delegatesByEpochFunc DelegatesByEpochFunc beringHeight uint64 } // UpdateRound updates previous roundCtx func (c *roundCalculator) UpdateRound(round *roundCtx, height uint64, blockInterval time.Duration, now time.Time, toleratedOvertime time.Duration) (*roundCtx, error) { epochNum := round.EpochNum() epochStartHeight := round.EpochStartHeight() delegates := round.Delegates() switch { case height < round.Height(): return nil, errors.New("cannot update to a lower height") case height == round.Height(): if now.Before(round.StartTime()) { return round, nil } default: if height >= round.NextEpochStartHeight() { // update the epoch epochNum = c.rp.GetEpochNum(height) epochStartHeight = c.rp.GetEpochHeight(epochNum) var err error if delegates, err = c.Delegates(height); err != nil { return nil, err } } } roundNum, roundStartTime, err := c.roundInfo(height, blockInterval, now, toleratedOvertime) if err != nil { return nil, err } proposer, err := c.calculateProposer(height, roundNum, delegates) if err != nil { return nil, err } var status status var blockInLock []byte var proofOfLock []*endorsement.Endorsement if height == round.Height() { err = round.eManager.Cleanup(roundStartTime) if err != nil { return nil, err } status = round.status blockInLock = round.blockInLock proofOfLock = round.proofOfLock } else { err = round.eManager.Cleanup(time.Time{}) if err != nil { return nil, err } } return &roundCtx{ epochNum: epochNum, epochStartHeight: epochStartHeight, nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1), delegates: delegates, height: height, roundNum: roundNum, proposer: proposer, roundStartTime: roundStartTime, nextRoundStartTime: roundStartTime.Add(blockInterval), eManager: round.eManager, status: status, blockInLock: blockInLock, proofOfLock: proofOfLock, }, nil } // Proposer returns the block producer of the round func (c *roundCalculator) Proposer(height uint64, blockInterval time.Duration, roundStartTime time.Time) string { round, err := c.newRound(height, blockInterval, roundStartTime, nil, 0) if err != nil { return "" } return round.Proposer() } func (c *roundCalculator) IsDelegate(addr string, height uint64) bool { delegates, err := c.Delegates(height) if err != nil { return false } for _, d := range delegates { if addr == d { return true } } return false } // RoundInfo returns information of round by the given height and current time func (c *roundCalculator) RoundInfo( height uint64, blockInterval time.Duration, now time.Time, ) (roundNum uint32, roundStartTime time.Time, err error) { return c.roundInfo(height, blockInterval, now, 0) } func (c *roundCalculator) roundInfo( height uint64, blockInterval time.Duration, now time.Time, toleratedOvertime time.Duration, ) (roundNum uint32, roundStartTime time.Time, err error) { lastBlockTime := time.Unix(c.chain.Genesis().Timestamp, 0) if height > 1 { if height >= c.beringHeight { var lastBlock *block.Header if lastBlock, err = c.chain.BlockHeaderByHeight(height - 1); err != nil { return } lastBlockTime = lastBlockTime.Add(lastBlock.Timestamp().Sub(lastBlockTime) / blockInterval * blockInterval) } else { var lastBlock *block.Footer if lastBlock, err = c.chain.BlockFooterByHeight(height - 1); err != nil { return } lastBlockTime = lastBlockTime.Add(lastBlock.CommitTime().Sub(lastBlockTime) / blockInterval * blockInterval) } } if !lastBlockTime.Before(now) { err = errors.Errorf( "last block time %s is a future time, vs now %s", lastBlockTime, now, ) return } duration := now.Sub(lastBlockTime) if duration > blockInterval { roundNum = uint32(duration / blockInterval) if toleratedOvertime == 0 || duration%blockInterval < toleratedOvertime { roundNum-- } } roundStartTime = lastBlockTime.Add(time.Duration(roundNum+1) * blockInterval) return roundNum, roundStartTime, nil } // Delegates returns list of delegates at given height func (c *roundCalculator) Delegates(height uint64) ([]string, error) { epochNum := c.rp.GetEpochNum(height) return c.delegatesByEpochFunc(epochNum) } // NewRoundWithToleration starts new round with tolerated over time func (c *roundCalculator) NewRoundWithToleration( height uint64, blockInterval time.Duration, now time.Time, eManager *endorsementManager, toleratedOvertime time.Duration, ) (round *roundCtx, err error) { return c.newRound(height, blockInterval, now, eManager, toleratedOvertime) } // NewRound starts new round and returns roundCtx func (c *roundCalculator) NewRound( height uint64, blockInterval time.Duration, now time.Time, eManager *endorsementManager, ) (round *roundCtx, err error) { return c.newRound(height, blockInterval, now, eManager, 0) } func (c *roundCalculator) newRound( height uint64, blockInterval time.Duration, now time.Time, eManager *endorsementManager, toleratedOvertime time.Duration, ) (round *roundCtx, err error) { epochNum := uint64(0) epochStartHeight := uint64(0) var delegates []string var roundNum uint32 var proposer string var roundStartTime time.Time if height != 0 { epochNum = c.rp.GetEpochNum(height) epochStartHeight = c.rp.GetEpochHeight(epochNum) if delegates, err = c.Delegates(height); err != nil { return } if roundNum, roundStartTime, err = c.roundInfo(height, blockInterval, now, toleratedOvertime); err != nil { return } if proposer, err = c.calculateProposer(height, roundNum, delegates); err != nil { return } } if eManager == nil { if eManager, err = newEndorsementManager(nil); err != nil { return nil, err } } round = &roundCtx{ epochNum: epochNum, epochStartHeight: epochStartHeight, nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1), delegates: delegates, height: height, roundNum: roundNum, proposer: proposer, eManager: eManager, roundStartTime: roundStartTime, nextRoundStartTime: roundStartTime.Add(blockInterval), status: open, } eManager.SetIsMarjorityFunc(round.EndorsedByMajority) return round, nil } // calculateProposer calulates proposer according to height and round number func (c *roundCalculator) calculateProposer( height uint64, round uint32, delegates []string, ) (proposer string, err error) { numDelegates := c.rp.NumDelegates() if numDelegates != uint64(len(delegates)) { err = errors.New("invalid delegate list") return } idx := height if c.timeBasedRotation { idx += uint64(round) } proposer = delegates[idx%numDelegates] return }
1
21,675
roundInfo() is called by couple of places in consensus for safety, better test/verify the delay of return err due to this Sleep() would not affect the normal functionality, like consensus can still be reached, full-node can correctly sync
iotexproject-iotex-core
go
@@ -376,8 +376,9 @@ func (sb *RustSectorBuilder) GeneratePoSt(req GeneratePoStRequest) (GeneratePoSt defer elapsed("GeneratePoSt")() // flattening the byte slice makes it easier to copy into the C heap - flattened := make([]byte, 32*len(req.CommRs)) - for idx, commR := range req.CommRs { + commRs := req.SortedCommRs.Values() + flattened := make([]byte, 32*len(commRs)) + for idx, commR := range commRs { copy(flattened[(32*idx):(32*(1+idx))], commR[:]) }
1
// +build !windows package sectorbuilder import ( "bytes" "context" "io" "runtime" "time" "unsafe" bserv "github.com/ipfs/go-blockservice" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/proofs/sectorbuilder/bytesink" ) // #cgo LDFLAGS: -L${SRCDIR}/../lib -lfilecoin_proofs // #cgo pkg-config: ${SRCDIR}/../lib/pkgconfig/libfilecoin_proofs.pc // #include "../include/libfilecoin_proofs.h" import "C" var log = logging.Logger("sectorbuilder") // nolint: deadcode // MaxNumStagedSectors configures the maximum number of staged sectors which can // be open and accepting data at any time. const MaxNumStagedSectors = 1 // MaxTimeToWriteBytesToSink configures the maximum amount of time it should // take to copy user piece bytes from the provided Reader to the ByteSink. const MaxTimeToWriteBytesToSink = time.Second * 30 // stagedSectorMetadata is a sector into which we write user piece-data before // sealing. Note: sectorID is unique across all staged and sealed sectors for a // miner. type stagedSectorMetadata struct { sectorID uint64 } func elapsed(what string) func() { start := time.Now() return func() { log.Debugf("%s took %v\n", what, time.Since(start)) } } // RustSectorBuilder is a struct which serves as a proxy for a SectorBuilder in Rust. type RustSectorBuilder struct { blockService bserv.BlockService ptr unsafe.Pointer // sectorSealResults is sent a value whenever seal completes for a sector, // either successfully or with a failure. sectorSealResults chan SectorSealResult // sealStatusPoller polls for sealing status for the sectors whose ids it // knows about. sealStatusPoller *sealStatusPoller // sectorStoreType configures behavior of libfilecoin_proofs, including // sector packing, sector sizes, sealing and PoSt generation performance. sectorStoreType proofs.Mode } var _ SectorBuilder = &RustSectorBuilder{} // RustSectorBuilderConfig is a configuration object used when instantiating a // Rust-backed SectorBuilder through the FFI. All fields are required. type RustSectorBuilderConfig struct { BlockService bserv.BlockService LastUsedSectorID uint64 MetadataDir string MinerAddr address.Address SealedSectorDir string ProofsMode proofs.Mode StagedSectorDir string } // NewRustSectorBuilder instantiates a SectorBuilder through the FFI. func NewRustSectorBuilder(cfg RustSectorBuilderConfig) (*RustSectorBuilder, error) { defer elapsed("NewRustSectorBuilder")() cMetadataDir := C.CString(cfg.MetadataDir) defer C.free(unsafe.Pointer(cMetadataDir)) proverID := AddressToProverID(cfg.MinerAddr) proverIDCBytes := C.CBytes(proverID[:]) defer C.free(proverIDCBytes) cStagedSectorDir := C.CString(cfg.StagedSectorDir) defer C.free(unsafe.Pointer(cStagedSectorDir)) cSealedSectorDir := C.CString(cfg.SealedSectorDir) defer C.free(unsafe.Pointer(cSealedSectorDir)) scfg, err := proofs.CProofsMode(cfg.ProofsMode) if err != nil { return nil, errors.Errorf("unknown sector store type: %v", cfg.ProofsMode) } resPtr := (*C.InitSectorBuilderResponse)(unsafe.Pointer(C.init_sector_builder( (*C.ConfiguredStore)(unsafe.Pointer(scfg)), C.uint64_t(cfg.LastUsedSectorID), cMetadataDir, (*[31]C.uint8_t)(proverIDCBytes), cSealedSectorDir, cStagedSectorDir, C.uint8_t(MaxNumStagedSectors), ))) defer C.destroy_init_sector_builder_response(resPtr) if resPtr.status_code != 0 { return nil, errors.New(C.GoString(resPtr.error_msg)) } sb := &RustSectorBuilder{ blockService: cfg.BlockService, ptr: unsafe.Pointer(resPtr.sector_builder), sectorSealResults: make(chan SectorSealResult), sectorStoreType: cfg.ProofsMode, } // load staged sector metadata and use it to initialize the poller metadata, err := sb.stagedSectors() if err != nil { return nil, errors.Wrap(err, "failed to load staged sectors") } stagedSectorIDs := make([]uint64, len(metadata)) for idx, m := range metadata { stagedSectorIDs[idx] = m.sectorID } sb.sealStatusPoller = newSealStatusPoller(stagedSectorIDs, sb.sectorSealResults, sb.findSealedSectorMetadata) runtime.SetFinalizer(sb, func(o *RustSectorBuilder) { o.destroy() }) return sb, nil } // GetMaxUserBytesPerStagedSector produces the number of user piece-bytes which // will fit into a newly-provisioned staged sector. func (sb *RustSectorBuilder) GetMaxUserBytesPerStagedSector() (numBytes uint64, err error) { defer elapsed("GetMaxUserBytesPerStagedSector")() scfg, err := proofs.CProofsMode(sb.sectorStoreType) if err != nil { return 0, errors.Wrap(err, "CSectorStoreType failed") } return uint64(C.get_max_user_bytes_per_staged_sector((*C.ConfiguredStore)(unsafe.Pointer(scfg)))), nil } // AddPiece writes the given piece into an unsealed sector and returns the id // of that sector. func (sb *RustSectorBuilder) AddPiece(ctx context.Context, pieceRef cid.Cid, pieceSize uint64, pieceReader io.Reader) (sectorID uint64, retErr error) { defer elapsed("AddPiece")() ctx, cancel := context.WithTimeout(ctx, MaxTimeToWriteBytesToSink) defer cancel() sink, err := bytesink.NewFifo() if err != nil { return 0, err } // errCh holds any error encountered when streaming bytes or making the CGO // call. The channel is buffered so that the goroutines can exit, which will // close the pipe, which unblocks the CGO call. errCh := make(chan error, 2) defer close(errCh) // sectorIDCh receives a value if the CGO call indicates that the client // piece has successfully been added to a sector. The channel is buffered // so that the goroutine can exit if a value is sent to errCh before the // CGO call completes. sectorIDCh := make(chan uint64, 1) defer close(sectorIDCh) // goroutine attempts to copy bytes from piece's reader to the sink go func() { // opening the sink blocks the goroutine until a reader is opened on the // other end of the FIFO pipe err := sink.Open() if err != nil { errCh <- errors.Wrap(err, "failed to open sink") return } // closing the sink signals to the reader that we're done writing, which // unblocks the reader defer func() { err := sink.Close() if err != nil { log.Warningf("failed to close sink: %s", err) } }() n, err := io.Copy(sink, pieceReader) if err != nil { errCh <- errors.Wrap(err, "failed to copy to pipe") return } if uint64(n) != pieceSize { errCh <- errors.Errorf("expected to write %d bytes but wrote %d", pieceSize, n) return } }() // goroutine makes CGO call, which blocks until FIFO pipe opened for writing // from within other goroutine go func() { cPieceKey := C.CString(pieceRef.String()) defer C.free(unsafe.Pointer(cPieceKey)) cSinkPath := C.CString(sink.ID()) defer C.free(unsafe.Pointer(cSinkPath)) resPtr := (*C.AddPieceResponse)(unsafe.Pointer(C.add_piece( (*C.SectorBuilder)(sb.ptr), cPieceKey, C.uint64_t(pieceSize), cSinkPath, ))) defer C.destroy_add_piece_response(resPtr) if resPtr.status_code != 0 { msg := "CGO add_piece returned an error (error_msg=%s, sinkPath=%s)" log.Errorf(msg, C.GoString(resPtr.error_msg), sink.ID()) errCh <- errors.New(C.GoString(resPtr.error_msg)) return } sectorIDCh <- uint64(resPtr.sector_id) }() select { case <-ctx.Done(): errStr := "context completed before CGO call could return" strFmt := "%s (sinkPath=%s)" log.Errorf(strFmt, errStr, sink.ID()) return 0, errors.New(errStr) case err := <-errCh: errStr := "error streaming piece-bytes" strFmt := "%s (sinkPath=%s)" log.Errorf(strFmt, errStr, sink.ID()) return 0, errors.Wrap(err, errStr) case sectorID := <-sectorIDCh: go sb.sealStatusPoller.addSectorID(sectorID) log.Infof("add piece complete (pieceRef=%s, sectorID=%d, sinkPath=%s)", pieceRef.String(), sectorID, sink.ID()) return sectorID, nil } } func (sb *RustSectorBuilder) findSealedSectorMetadata(sectorID uint64) (*SealedSectorMetadata, error) { resPtr := (*C.GetSealStatusResponse)(unsafe.Pointer(C.get_seal_status((*C.SectorBuilder)(sb.ptr), C.uint64_t(sectorID)))) defer C.destroy_get_seal_status_response(resPtr) if resPtr.status_code != 0 { return nil, errors.New(C.GoString(resPtr.error_msg)) } if resPtr.seal_status_code == C.Failed { return nil, errors.New(C.GoString(resPtr.seal_error_msg)) } else if resPtr.seal_status_code == C.Pending { return nil, nil } else if resPtr.seal_status_code == C.Sealing { return nil, nil } else if resPtr.seal_status_code == C.Sealed { commRSlice := C.GoBytes(unsafe.Pointer(&resPtr.comm_r[0]), 32) var commR proofs.CommR copy(commR[:], commRSlice) commDSlice := C.GoBytes(unsafe.Pointer(&resPtr.comm_d[0]), 32) var commD proofs.CommD copy(commD[:], commDSlice) commRStarSlice := C.GoBytes(unsafe.Pointer(&resPtr.comm_r_star[0]), 32) var commRStar proofs.CommRStar copy(commRStar[:], commRStarSlice) proofSlice := C.GoBytes(unsafe.Pointer(&resPtr.snark_proof[0]), 384) var proof proofs.SealProof copy(proof[:], proofSlice) ps, err := goPieceInfos((*C.FFIPieceMetadata)(unsafe.Pointer(resPtr.pieces_ptr)), resPtr.pieces_len) if err != nil { return nil, errors.Wrap(err, "failed to marshal from string to cid") } return &SealedSectorMetadata{ CommD: commD, CommR: commR, CommRStar: commRStar, Pieces: ps, Proof: proof, SectorID: sectorID, }, nil } else { // unknown return nil, errors.New("unexpected seal status") } } // ReadPieceFromSealedSector produces a Reader used to get original piece-bytes // from a sealed sector. func (sb *RustSectorBuilder) ReadPieceFromSealedSector(pieceCid cid.Cid) (io.Reader, error) { cPieceKey := C.CString(pieceCid.String()) defer C.free(unsafe.Pointer(cPieceKey)) resPtr := (*C.ReadPieceFromSealedSectorResponse)(unsafe.Pointer(C.read_piece_from_sealed_sector((*C.SectorBuilder)(sb.ptr), cPieceKey))) defer C.destroy_read_piece_from_sealed_sector_response(resPtr) if resPtr.status_code != 0 { return nil, errors.New(C.GoString(resPtr.error_msg)) } return bytes.NewReader(C.GoBytes(unsafe.Pointer(resPtr.data_ptr), C.int(resPtr.data_len))), nil } // SealAllStagedSectors schedules sealing of all staged sectors. func (sb *RustSectorBuilder) SealAllStagedSectors(ctx context.Context) error { resPtr := (*C.SealAllStagedSectorsResponse)(unsafe.Pointer(C.seal_all_staged_sectors((*C.SectorBuilder)(sb.ptr)))) defer C.destroy_seal_all_staged_sectors_response(resPtr) if resPtr.status_code != 0 { return errors.New(C.GoString(resPtr.error_msg)) } return nil } // stagedSectors returns a slice of all staged sector metadata for the sector builder, or an error. func (sb *RustSectorBuilder) stagedSectors() ([]*stagedSectorMetadata, error) { resPtr := (*C.GetStagedSectorsResponse)(unsafe.Pointer(C.get_staged_sectors((*C.SectorBuilder)(sb.ptr)))) defer C.destroy_get_staged_sectors_response(resPtr) if resPtr.status_code != 0 { return nil, errors.New(C.GoString(resPtr.error_msg)) } meta, err := goStagedSectorMetadata((*C.FFIStagedSectorMetadata)(unsafe.Pointer(resPtr.sectors_ptr)), resPtr.sectors_len) if err != nil { return nil, err } return meta, nil } // SectorSealResults returns an unbuffered channel that is sent a value whenever // sealing completes. func (sb *RustSectorBuilder) SectorSealResults() <-chan SectorSealResult { return sb.sectorSealResults } // Close shuts down the RustSectorBuilder's poller. func (sb *RustSectorBuilder) Close() error { sb.sealStatusPoller.stop() return nil } // GeneratePoSt produces a proof-of-spacetime for the provided commitment replicas. func (sb *RustSectorBuilder) GeneratePoSt(req GeneratePoStRequest) (GeneratePoStResponse, error) { defer elapsed("GeneratePoSt")() // flattening the byte slice makes it easier to copy into the C heap flattened := make([]byte, 32*len(req.CommRs)) for idx, commR := range req.CommRs { copy(flattened[(32*idx):(32*(1+idx))], commR[:]) } // copy the Go byte slice into C memory cflattened := C.CBytes(flattened) defer C.free(cflattened) challengeSeedPtr := unsafe.Pointer(&(req.ChallengeSeed)[0]) // a mutable pointer to a GeneratePoStResponse C-struct resPtr := (*C.GeneratePoStResponse)(unsafe.Pointer(C.generate_post((*C.SectorBuilder)(sb.ptr), (*C.uint8_t)(cflattened), C.size_t(len(flattened)), (*[32]C.uint8_t)(challengeSeedPtr)))) defer C.destroy_generate_post_response(resPtr) if resPtr.status_code != 0 { return GeneratePoStResponse{}, errors.New(C.GoString(resPtr.error_msg)) } proofs, err := goPoStProofs(resPtr.flattened_proofs_ptr, resPtr.flattened_proofs_len) if err != nil { return GeneratePoStResponse{}, err } return GeneratePoStResponse{ Proofs: proofs, Faults: goUint64s(resPtr.faults_ptr, resPtr.faults_len), }, nil } // goPoStProofs accepts a pointer to a C-allocated byte array and a size and // produces a Go-managed slice of PoStProof. Note that this function copies // values into the Go heap from C. func goPoStProofs(src *C.uint8_t, size C.size_t) ([]proofs.PoStProof, error) { chunkSize := int(proofs.PoStBytesLen) arrSize := int(size) if src == nil { return []proofs.PoStProof{}, nil } if arrSize%chunkSize != 0 { msg := "PoSt proof array invalid size (arrSize=%d % PoStBytesLen=%d != 0)" return nil, errors.Errorf(msg, arrSize, proofs.PoStBytesLen) } out := make([]proofs.PoStProof, arrSize/chunkSize) // Create a slice from a pointer to an array on the C heap by slicing to // the appropriate size. We can then copy from this slice into the Go heap. // // https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices tmp := (*(*[1 << 30]byte)(unsafe.Pointer(src)))[:size:size] for i := 0; i < len(out); i++ { copy(out[i][:], tmp[i*chunkSize:(i+1)*chunkSize]) } return out, nil } // goUint64s accepts a pointer to a C-allocated uint64 and a size and produces // a Go-managed slice of uint64. Note that this function copies values into the // Go heap from C. func goUint64s(src *C.uint64_t, size C.size_t) []uint64 { out := make([]uint64, size) if src != nil { copy(out, (*(*[1 << 30]uint64)(unsafe.Pointer(src)))[:size:size]) } return out } // destroy deallocates and destroys a RustSectorBuilder. func (sb *RustSectorBuilder) destroy() { C.destroy_sector_builder((*C.SectorBuilder)(sb.ptr)) sb.ptr = nil } func goStagedSectorMetadata(src *C.FFIStagedSectorMetadata, size C.size_t) ([]*stagedSectorMetadata, error) { sectors := make([]*stagedSectorMetadata, size) if src == nil || size == 0 { return sectors, nil } sectorPtrs := (*[1 << 30]C.FFIStagedSectorMetadata)(unsafe.Pointer(src))[:size:size] for i := 0; i < int(size); i++ { sectors[i] = &stagedSectorMetadata{ sectorID: uint64(sectorPtrs[i].sector_id), } } return sectors, nil } func goPieceInfos(src *C.FFIPieceMetadata, size C.size_t) ([]*PieceInfo, error) { ps := make([]*PieceInfo, size) if src == nil || size == 0 { return ps, nil } ptrs := (*[1 << 30]C.FFIPieceMetadata)(unsafe.Pointer(src))[:size:size] for i := 0; i < int(size); i++ { ref, err := cid.Decode(C.GoString(ptrs[i].piece_key)) if err != nil { return nil, err } ps[i] = &PieceInfo{ Ref: ref, Size: uint64(ptrs[i].num_bytes), } } return ps, nil }
1
18,509
This 32 could go away too.
filecoin-project-venus
go
@@ -146,6 +146,8 @@ class Notification extends Component { module, moduleName, pageIndex, + pageSpeedLink, + pageSpeedLabel, } = this.props; if ( getCache( `notification::dismissed::${ id }` ) ) {
1
/** * Notification component. * * Site Kit by Google, Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import PropTypes from 'prop-types'; import classnames from 'classnames'; import { map } from 'lodash'; /** * WordPress dependencies */ import { Component, Fragment, createRef, isValidElement } from '@wordpress/element'; /** * Internal dependencies */ import SvgIcon from '../../util/svg-icon'; import { sanitizeHTML } from '../../util/sanitize'; import { setCache, getCache, deleteCache } from '../data/cache'; import DataBlock from '../data-block'; import Button from '../button'; import Warning from '../notifications/warning'; import Error from '../notifications/error'; import Link from '../link'; class Notification extends Component { constructor( props ) { super( props ); this.state = { isClosed: false, }; this.cardRef = createRef(); this.handleDismiss = this.handleDismiss.bind( this ); this.handleCTAClick = this.handleCTAClick.bind( this ); if ( 0 < this.props.dismissExpires ) { this.expireDismiss(); } if ( this.props.showOnce ) { setCache( `notification::displayed::${ this.props.id }`, new Date() ); } } async handleDismiss( e ) { e.persist(); e.preventDefault(); const { onDismiss } = this.props; if ( onDismiss ) { await onDismiss( e ); } this.dismiss(); } dismiss() { const card = this.cardRef.current; this.setState( { isClosed: true, } ); setTimeout( () => { setCache( `notification::dismissed::${ this.props.id }`, new Date() ); card.style.display = 'none'; const event = new Event( 'notificationDismissed' ); document.dispatchEvent( event ); }, 350 ); } async handleCTAClick( e ) { e.persist(); const { isDismissable, onCTAClick } = this.props; if ( onCTAClick ) { await onCTAClick( e ); } if ( isDismissable ) { this.dismiss(); } } expireDismiss() { const { id, dismissExpires, } = this.props; const dismissed = getCache( `notification::dismissed::${ id }` ); if ( dismissed ) { const expiration = new Date( dismissed ); expiration.setSeconds( expiration.getSeconds() + parseInt( dismissExpires, 10 ) ); if ( expiration < new Date() ) { deleteCache( `notification::dismissed::${ id }` ); } } } render() { const { isClosed } = this.state; const { children, id, title, description, blockData, winImage, smallImage, format, learnMoreURL, learnMoreDescription, learnMoreLabel, ctaLink, ctaLabel, ctaTarget, type, dismiss, isDismissable, logo, module, moduleName, pageIndex, } = this.props; if ( getCache( `notification::dismissed::${ id }` ) ) { return null; } const closedClass = isClosed ? 'is-closed' : 'is-open'; const inlineLayout = 'large' === format && 'win-stats-increase' === type; let layout = 'mdc-layout-grid__cell--span-12'; if ( 'large' === format ) { layout = 'mdc-layout-grid__cell--order-2-phone ' + 'mdc-layout-grid__cell--order-1-tablet ' + 'mdc-layout-grid__cell--span-6-tablet ' + 'mdc-layout-grid__cell--span-8-desktop '; if ( inlineLayout ) { layout = 'mdc-layout-grid__cell--order-2-phone ' + 'mdc-layout-grid__cell--order-1-tablet ' + 'mdc-layout-grid__cell--span-5-tablet ' + 'mdc-layout-grid__cell--span-8-desktop '; } } else if ( 'small' === format ) { layout = 'mdc-layout-grid__cell--span-11-desktop ' + 'mdc-layout-grid__cell--span-7-tablet ' + 'mdc-layout-grid__cell--span-3-phone'; } let icon; if ( 'win-warning' === type ) { icon = <Warning />; } else if ( 'win-error' === type ) { icon = <Error />; } else { icon = ''; } const dataBlockMarkup = ( <Fragment> { blockData && <div className="mdc-layout-grid__inner"> { map( blockData, ( block, i ) => { return ( <div key={ i } className={ classnames( 'mdc-layout-grid__cell', { 'mdc-layout-grid__cell--span-5-desktop': inlineLayout, 'mdc-layout-grid__cell--span-4-desktop': ! inlineLayout, } ) } > <div className="googlesitekit-publisher-win__stats"> <DataBlock { ...block } /> </div> </div> ); } ) } </div> } </Fragment> ); const inlineMarkup = ( <Fragment> { title && <h3 className="googlesitekit-heading-2 googlesitekit-publisher-win__title"> { title } </h3> } { description && <div className="googlesitekit-publisher-win__desc"> <p> { isValidElement( description ) ? description : ( <span dangerouslySetInnerHTML={ sanitizeHTML( description, { ALLOWED_TAGS: [ 'strong', 'em', 'br', 'a' ], ALLOWED_ATTR: [ 'href' ], } ) } /> ) } { learnMoreLabel && <Fragment> { ' ' } <Link href={ learnMoreURL } external inherit> { learnMoreLabel } </Link> { learnMoreDescription } </Fragment> } { pageIndex && <span className="googlesitekit-publisher-win__detect">{ pageIndex }</span> } </p> </div> } { children } </Fragment> ); const logoSVG = module ? <SvgIcon id={ module } height="19" width="19" /> : <SvgIcon id={ 'logo-g' } height="34" width="32" />; return ( <section ref={ this.cardRef } className={ classnames( 'googlesitekit-publisher-win', { [ `googlesitekit-publisher-win--${ format }` ]: format, [ `googlesitekit-publisher-win--${ type }` ]: type, [ `googlesitekit-publisher-win--${ closedClass }` ]: closedClass, } ) } > <div className="mdc-layout-grid"> <div className="mdc-layout-grid__inner"> { logo && <div className={ classnames( 'mdc-layout-grid__cell', 'mdc-layout-grid__cell--span-12', { 'mdc-layout-grid__cell--order-2-phone': inlineLayout, 'mdc-layout-grid__cell--order-1-tablet': inlineLayout, } ) }> <div className="googlesitekit-publisher-win__logo"> { logoSVG } </div> { moduleName && <div className="googlesitekit-publisher-win__module-name"> { moduleName } </div> } </div> } { smallImage && <div className=" mdc-layout-grid__cell mdc-layout-grid__cell--span-1 "> <img className="googlesitekit-publisher-win__small-image" alt="" src={ smallImage } /> </div> } <div className={ classnames( 'mdc-layout-grid__cell', layout ) } > { inlineLayout ? ( <div className="mdc-layout-grid__inner"> <div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-5-desktop mdc-layout-grid__cell--span-8-tablet"> { inlineMarkup } </div> <div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-7-desktop mdc-layout-grid__cell--span-8-tablet mdc-layout-grid__cell--align-bottom"> { dataBlockMarkup } </div> </div> ) : ( <Fragment> { inlineMarkup } { dataBlockMarkup } </Fragment> ) } { ctaLink && <Button href={ ctaLink } target={ ctaTarget } onClick={ this.handleCTAClick } > { ctaLabel } </Button> } { isDismissable && dismiss && <Link onClick={ this.handleDismiss }> { dismiss } </Link> } </div> { winImage && <div className=" mdc-layout-grid__cell mdc-layout-grid__cell--order-1-phone mdc-layout-grid__cell--order-2-tablet mdc-layout-grid__cell--span-2-tablet mdc-layout-grid__cell--span-4-desktop "> <div className="googlesitekit-publisher-win__image-large"> <img alt="" src={ winImage } /> </div> </div> } { ( 'win-error' === type || 'win-warning' === type ) && <div className=" mdc-layout-grid__cell mdc-layout-grid__cell--span-1 "> <div className="googlesitekit-publisher-win__icons"> { icon } </div> </div> } </div> </div> </section> ); } } Notification.propTypes = { id: PropTypes.string.isRequired, title: PropTypes.string.isRequired, description: PropTypes.node, learnMoreURL: PropTypes.string, learnMoreDescription: PropTypes.string, learnMoreLabel: PropTypes.string, blockData: PropTypes.array, winImage: PropTypes.string, smallImage: PropTypes.string, format: PropTypes.string, ctaLink: PropTypes.string, ctaLabel: PropTypes.string, type: PropTypes.string, dismiss: PropTypes.string, isDismissable: PropTypes.bool, logo: PropTypes.bool, module: PropTypes.string, moduleName: PropTypes.string, pageIndex: PropTypes.string, dismissExpires: PropTypes.number, showOnce: PropTypes.bool, onCTAClick: PropTypes.func, onDismiss: PropTypes.func, }; Notification.defaultProps = { isDismissable: true, dismissExpires: 0, showOnce: false, }; export default Notification;
1
29,104
These props should be less specifically named since the component is generic, for example `anchorLink`, `anchorLinkLabel`.
google-site-kit-wp
js
@@ -189,13 +189,13 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, // and AWSMachine // 3. Issue a delete // 4. Scale controller deployment to 1 - machineScope.V(2).Info("Unable to locate instance by ID or tags") + machineScope.V(2).Info("Unable to locate EC2 instance by ID or tags") r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "NoInstanceFound", "Unable to find matching EC2 instance") machineScope.AWSMachine.Finalizers = util.Filter(machineScope.AWSMachine.Finalizers, infrav1.MachineFinalizer) return reconcile.Result{}, nil } - machineScope.V(3).Info("Instance found matching deleted AWSMachine", "instanceID", instance.ID) + machineScope.V(3).Info("EC2 instance found matching deleted AWSMachine", "instance-id", instance.ID) // Check the instance state. If it's already shutting down or terminated, // do nothing. Otherwise attempt to delete it.
1
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/controllers/noderefutil" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) // AWSMachineReconciler reconciles a AwsMachine object type AWSMachineReconciler struct { client.Client Log logr.Logger Recorder record.EventRecorder serviceFactory func(*scope.ClusterScope) services.EC2MachineInterface } func (r *AWSMachineReconciler) getEC2Service(scope *scope.ClusterScope) services.EC2MachineInterface { if r.serviceFactory != nil { return r.serviceFactory(scope) } return ec2.NewService(scope) } // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch func (r *AWSMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { ctx := context.TODO() logger := r.Log.WithValues("namespace", req.Namespace, "awsMachine", req.Name) // Fetch the AWSMachine instance. awsMachine := &infrav1.AWSMachine{} err := r.Get(ctx, req.NamespacedName, awsMachine) if err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil } return reconcile.Result{}, err } // Fetch the Machine. machine, err := util.GetOwnerMachine(ctx, r.Client, awsMachine.ObjectMeta) if err != nil { return reconcile.Result{}, err } if machine == nil { logger.Info("Machine Controller has not yet set OwnerRef") return reconcile.Result{}, nil } logger = logger.WithValues("machine", machine.Name) // Fetch the Cluster. cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { logger.Info("Machine is missing cluster label or cluster does not exist") return reconcile.Result{}, nil } logger = logger.WithValues("cluster", cluster.Name) awsCluster := &infrav1.AWSCluster{} awsClusterName := client.ObjectKey{ Namespace: awsMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } if err := r.Client.Get(ctx, awsClusterName, awsCluster); err != nil { logger.Info("AWSCluster is not available yet") return reconcile.Result{}, nil } logger = logger.WithValues("awsCluster", awsCluster.Name) // Create the cluster scope clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: r.Client, Logger: logger, Cluster: cluster, AWSCluster: awsCluster, }) if err != nil { return reconcile.Result{}, err } // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Logger: logger, Client: r.Client, Cluster: cluster, Machine: machine, AWSCluster: awsCluster, AWSMachine: awsMachine, }) if err != nil { return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) } // Always close the scope when exiting this function so we can persist any AWSMachine changes. defer func() { if err := machineScope.Close(); err != nil && reterr == nil { reterr = err } }() // Handle deleted machines if !awsMachine.ObjectMeta.DeletionTimestamp.IsZero() { return r.reconcileDelete(machineScope, clusterScope) } // Handle non-deleted machines return r.reconcileNormal(ctx, machineScope, clusterScope) } func (r *AWSMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.AWSMachine{}). Watches( &source.Kind{Type: &clusterv1.Machine{}}, &handler.EnqueueRequestsFromMapFunc{ ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AWSMachine")), }, ). Watches( &source.Kind{Type: &infrav1.AWSCluster{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AWSClusterToAWSMachines)}, ). Complete(r) } func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { machineScope.Info("Handling deleted AWSMachine") ec2Service := r.getEC2Service(clusterScope) instance, err := r.findInstance(machineScope, ec2Service) if err != nil { return reconcile.Result{}, err } if instance == nil { // The machine was never created or was deleted by some other entity // One way to reach this state: // 1. Scale deployment to 0 // 2. Rename EC2 machine, and delete ProviderID from spec of both Machine // and AWSMachine // 3. Issue a delete // 4. Scale controller deployment to 1 machineScope.V(2).Info("Unable to locate instance by ID or tags") r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "NoInstanceFound", "Unable to find matching EC2 instance") machineScope.AWSMachine.Finalizers = util.Filter(machineScope.AWSMachine.Finalizers, infrav1.MachineFinalizer) return reconcile.Result{}, nil } machineScope.V(3).Info("Instance found matching deleted AWSMachine", "instanceID", instance.ID) // Check the instance state. If it's already shutting down or terminated, // do nothing. Otherwise attempt to delete it. // This decision is based on the ec2-instance-lifecycle graph at // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html switch instance.State { case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated: machineScope.Info("Instance is shutting down or already terminated", "instanceID", instance.ID) default: machineScope.Info("Terminating instance", "instanceID", instance.ID) if err := ec2Service.TerminateInstanceAndWait(instance.ID); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return reconcile.Result{}, errors.Wrap(err, "failed to terminate instance") } // If the AWSMachine specifies Network Interfaces, detach the cluster's core Security Groups from them as part of deletion. if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 { core, err := ec2Service.GetCoreSecurityGroups(machineScope) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to get core security groups to detach from instance's network interfaces") } machineScope.V(3).Info( "Detaching security groups from provided network interface", "groups", core, "instanceID", instance.ID, ) for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to detach security groups from instance's network interfaces") } } } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulTerminate", "Terminated instance %q", instance.ID) } // Instance is deleted so remove the finalizer. machineScope.AWSMachine.Finalizers = util.Filter(machineScope.AWSMachine.Finalizers, infrav1.MachineFinalizer) return reconcile.Result{}, nil } // findInstance queries the EC2 apis and retrieves the instance if it exists, returns nil otherwise. func (r *AWSMachineReconciler) findInstance(scope *scope.MachineScope, ec2svc services.EC2MachineInterface) (*infrav1.Instance, error) { // Parse the ProviderID. pid, err := noderefutil.NewProviderID(scope.GetProviderID()) if err != nil && err != noderefutil.ErrEmptyProviderID { return nil, errors.Wrapf(err, "failed to parse Spec.ProviderID") } // If the ProviderID is populated, describe the instance using the ID. if err == nil { instance, err := ec2svc.InstanceIfExists(pointer.StringPtr(pid.ID())) if err != nil { return nil, errors.Wrapf(err, "failed to query AWSMachine instance") } return instance, nil } // If the ProviderID is empty, try to query the instance using tags. instance, err := ec2svc.GetRunningInstanceByTags(scope) if err != nil { return nil, errors.Wrapf(err, "failed to query AWSMachine instance by tags") } return instance, nil } func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { machineScope.Info("Reconciling AWSMachine") // If the AWSMachine is in an error state, return early. if machineScope.AWSMachine.Status.ErrorReason != nil || machineScope.AWSMachine.Status.ErrorMessage != nil { machineScope.Info("Error state detected, skipping reconciliation") return reconcile.Result{}, nil } // If the AWSMachine doesn't have our finalizer, add it. if !util.Contains(machineScope.AWSMachine.Finalizers, infrav1.MachineFinalizer) { machineScope.AWSMachine.Finalizers = append(machineScope.AWSMachine.Finalizers, infrav1.MachineFinalizer) } if !machineScope.Cluster.Status.InfrastructureReady { machineScope.Info("Cluster infrastructure is not ready yet") return reconcile.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.Data == nil { machineScope.Info("Bootstrap data is not yet available") return reconcile.Result{}, nil } ec2svc := r.getEC2Service(clusterScope) // Get or create the instance. instance, err := r.getOrCreate(machineScope, ec2svc) if err != nil { return reconcile.Result{}, err } // Set an error message if we couldn't find the instance. if instance == nil { machineScope.SetErrorReason(capierrors.UpdateMachineError) machineScope.SetErrorMessage(errors.New("EC2 instance cannot be found")) return reconcile.Result{}, nil } // TODO(ncdc): move this validation logic into a validating webhook if errs := r.validateUpdate(&machineScope.AWSMachine.Spec, instance); len(errs) > 0 { agg := kerrors.NewAggregate(errs) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InvalidUpdate", "Invalid update: %s", agg.Error()) return reconcile.Result{}, nil } // Make sure Spec.ProviderID is always set. machineScope.SetProviderID(fmt.Sprintf("aws:////%s", instance.ID)) // Proceed to reconcile the AWSMachine state. machineScope.SetInstanceState(instance.State) // TODO(vincepri): Remove this annotation when clusterctl is no longer relevant. machineScope.SetAnnotation("cluster-api-provider-aws", "true") switch instance.State { case infrav1.InstanceStateRunning: machineScope.Info("Machine instance is running", "instance-id", *machineScope.GetInstanceID()) machineScope.SetReady() case infrav1.InstanceStatePending: machineScope.Info("Machine instance is pending", "instance-id", *machineScope.GetInstanceID()) default: machineScope.SetErrorReason(capierrors.UpdateMachineError) machineScope.SetErrorMessage(errors.Errorf("EC2 instance state %q is unexpected", instance.State)) } if err := r.reconcileLBAttachment(machineScope, clusterScope, instance); err != nil { return reconcile.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*machineScope.GetInstanceID()) if err != nil { return reconcile.Result{}, err } // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { return reconcile.Result{}, errors.Errorf("failed to apply security groups: %+v", err) } // Ensure that the tags are correct. _, err = r.ensureTags(ec2svc, machineScope.AWSMachine, machineScope.GetInstanceID(), machineScope.AdditionalTags()) if err != nil { return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err) } return reconcile.Result{}, nil } func (r *AWSMachineReconciler) getOrCreate(scope *scope.MachineScope, ec2svc services.EC2MachineInterface) (*infrav1.Instance, error) { instance, err := r.findInstance(scope, ec2svc) if err != nil { return nil, err } if instance == nil { // Create a new AWSMachine instance if we couldn't find a running instance. instance, err = ec2svc.CreateInstance(scope) if err != nil { return nil, errors.Wrapf(err, "failed to create AWSMachine instance") } } return instance, nil } func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope, i *infrav1.Instance) error { if !machineScope.IsControlPlane() { return nil } elbsvc := elb.NewService(clusterScope) if err := elbsvc.RegisterInstanceWithAPIServerELB(i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", i.ID, err) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", i.ID) } return nil } // validateUpdate checks that no immutable fields have been updated and // returns a slice of errors representing attempts to change immutable state. func (r *AWSMachineReconciler) validateUpdate(spec *infrav1.AWSMachineSpec, i *infrav1.Instance) (errs []error) { // Instance Type if spec.InstanceType != i.Type { errs = append(errs, errors.Errorf("instance type cannot be mutated from %q to %q", i.Type, spec.InstanceType)) } // IAM Profile if spec.IAMInstanceProfile != i.IAMProfile { errs = append(errs, errors.Errorf("instance IAM profile cannot be mutated from %q to %q", i.IAMProfile, spec.IAMInstanceProfile)) } // SSH Key Name (also account for default) if spec.SSHKeyName != aws.StringValue(i.SSHKeyName) && spec.SSHKeyName != "" { errs = append(errs, errors.Errorf("SSH key name cannot be mutated from %q to %q", aws.StringValue(i.SSHKeyName), spec.SSHKeyName)) } // Root Device Size if spec.RootDeviceSize > 0 && spec.RootDeviceSize != i.RootDeviceSize { errs = append(errs, errors.Errorf("Root volume size cannot be mutated from %v to %v", i.RootDeviceSize, spec.RootDeviceSize)) } // Subnet ID // spec.Subnet is a *AWSResourceReference and could technically be // a *string, ARN or Filter. However, elsewhere in the code it is only used // as a *string, so do the same here. if spec.Subnet != nil { if aws.StringValue(spec.Subnet.ID) != i.SubnetID { errs = append(errs, errors.Errorf("machine subnet ID cannot be mutated from %q to %q", i.SubnetID, aws.StringValue(spec.Subnet.ID))) } } // PublicIP check is a little more complicated as the machineConfig is a // simple bool indicating if the instance should have a public IP or not, // while the instanceDescription contains the public IP assigned to the // instance. // Work out whether the instance already has a public IP or not based on // the length of the PublicIP string. Anything >0 is assumed to mean it does // have a public IP. instanceHasPublicIP := false if len(aws.StringValue(i.PublicIP)) > 0 { instanceHasPublicIP = true } if aws.BoolValue(spec.PublicIP) != instanceHasPublicIP { errs = append(errs, errors.Errorf(`public IP setting cannot be mutated from "%v" to "%v"`, instanceHasPublicIP, aws.BoolValue(spec.PublicIP))) } return errs } // AWSClusterToAWSMachine is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation // of AWSMachines. func (r *AWSMachineReconciler) AWSClusterToAWSMachines(o handler.MapObject) []ctrl.Request { result := []ctrl.Request{} c, ok := o.Object.(*infrav1.AWSCluster) if !ok { r.Log.Error(errors.Errorf("expected a AWSCluster but got a %T", o.Object), "failed to get AWSMachine for AWSCluster") return nil } log := r.Log.WithValues("AWSCluster", c.Name, "Namespace", c.Namespace) cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: return result case err != nil: log.Error(err, "failed to get owning cluster") return result } labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name} machineList := &clusterv1.MachineList{} if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { log.Error(err, "failed to list Machines") return nil } for _, m := range machineList.Items { if m.Spec.InfrastructureRef.Name == "" { continue } name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } return result }
1
11,844
In the future, it'd be great if we can separate commits or PRs that aren't in the same scope
kubernetes-sigs-cluster-api-provider-aws
go
@@ -133,7 +133,11 @@ class ScriptableType(AutoPropertyType): def __new__(meta, name, bases, dict): cls = super(ScriptableType, meta).__new__(meta, name, bases, dict) - gestures = getattr(cls, "_%s__gestures" % cls.__name__, {}) + # #8463: To avoid name mangling conflicts, create a copy of the __gestures dictionary. + try: + gestures = getattr(cls, "_%s__gestures" % cls.__name__).copy() + except AttributeError: + gestures = {} # Python 3 incompatible. for name, script in dict.iteritems(): if not name.startswith('script_'):
1
#baseObject.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2007-2018 NV Access Limited, Christopher Toth, Babbage B.V. #This file is covered by the GNU General Public License. #See the file COPYING for more details. """Contains the base classes that many of NVDA's classes such as NVDAObjects, virtualBuffers, appModules, synthDrivers inherit from. These base classes provide such things as auto properties, and methods and properties for scripting and key binding. """ import weakref from logHandler import log class Getter(object): def __init__(self,fget): self.fget=fget def __get__(self,instance,owner): if not instance: return self return self.fget(instance) def setter(self,func): return property(fget=self._func,fset=func) def deleter(self,func): return property(fget=self._func,fdel=func) class CachingGetter(Getter): def __get__(self, instance, owner): if not instance: return self return instance._getPropertyViaCache(self.fget) class AutoPropertyType(type): def __init__(self,name,bases,dict): super(AutoPropertyType,self).__init__(name,bases,dict) cacheByDefault=False try: cacheByDefault=dict["cachePropertiesByDefault"] except KeyError: cacheByDefault=any(getattr(base, "cachePropertiesByDefault", False) for base in bases) # given _get_myVal, _set_myVal, and _del_myVal: "myVal" would be output 3 times # use a set comprehension to ensure unique values, "myVal" only needs to occur once. props={x[5:] for x in dict.keys() if x[0:5] in ('_get_','_set_','_del_')} for x in props: g=dict.get('_get_%s'%x,None) s=dict.get('_set_%s'%x,None) d=dict.get('_del_%s'%x,None) if x in dict: methodsString=",".join([str(i) for i in g,s,d if i]) raise TypeError("%s is already a class attribute, cannot create descriptor with methods %s"%(x,methodsString)) if not g: # There's a setter or deleter, but no getter. # This means it could be in one of the base classes. for base in bases: g = getattr(base,'_get_%s'%x,None) if g: break cache=dict.get('_cache_%s'%x,None) if cache is None: # The cache setting hasn't been specified in this class, but it could be in one of the bases. for base in bases: cache = getattr(base,'_cache_%s'%x,None) if cache is not None: break else: cache=cacheByDefault if g and not s and not d: setattr(self,x,(CachingGetter if cache else Getter)(g)) else: setattr(self,x,property(fget=g,fset=s,fdel=d)) class AutoPropertyObject(object): """A class that dynamicly supports properties, by looking up _get_* and _set_* methods at runtime. _get_x will make property x with a getter (you can get its value). _set_x will make a property x with a setter (you can set its value). If there is a _get_x but no _set_x then setting x will override the property completely. Properties can also be cached for the duration of one core pump cycle. This is useful if the same property is likely to be fetched multiple times in one cycle. For example, several NVDAObject properties are fetched by both braille and speech. Setting _cache_x to C{True} specifies that x should be cached. Setting it to C{False} specifies that it should not be cached. If _cache_x is not set, L{cachePropertiesByDefault} is used. """ __metaclass__=AutoPropertyType #: Tracks the instances of this class; used by L{invalidateCaches}. #: @type: weakref.WeakKeyDictionary __instances=weakref.WeakKeyDictionary() #: Specifies whether properties are cached by default; #: can be overridden for individual properties by setting _cache_propertyName. #: @type: bool cachePropertiesByDefault = False def __new__(cls, *args, **kwargs): self = super(AutoPropertyObject, cls).__new__(cls) #: Maps properties to cached values. #: @type: dict self._propertyCache={} self.__instances[self]=None return self def _getPropertyViaCache(self,getterMethod=None): if not getterMethod: raise ValueError("getterMethod is None") try: val=self._propertyCache[getterMethod] except KeyError: val=getterMethod(self) self._propertyCache[getterMethod]=val return val def invalidateCache(self): self._propertyCache.clear() @classmethod def invalidateCaches(cls): """Invalidate the caches for all current instances. """ # We use keys() here instead of iterkeys(), as invalidating the cache on an object may cause instances to disappear, # which would in turn cause an exception due to the dictionary changing size during iteration. for instance in cls.__instances.keys(): instance.invalidateCache() class ScriptableType(AutoPropertyType): """A metaclass used for collecting and caching gestures on a ScriptableObject""" def __new__(meta, name, bases, dict): cls = super(ScriptableType, meta).__new__(meta, name, bases, dict) gestures = getattr(cls, "_%s__gestures" % cls.__name__, {}) # Python 3 incompatible. for name, script in dict.iteritems(): if not name.startswith('script_'): continue scriptName = name[len("script_"):] if hasattr(script, 'gestures'): for gesture in script.gestures: gestures[gesture] = scriptName setattr(cls, "_%s__gestures" % cls.__name__, gestures) return cls class ScriptableObject(AutoPropertyObject): """A class that implements NVDA's scripting interface. Input gestures are bound to scripts such that the script will be executed when the appropriate input gesture is received. Scripts are methods named with a prefix of C{script_}; e.g. C{script_foo}. They accept an L{inputCore.InputGesture} as their single argument. Gesture bindings can be specified on the class by creating a C{__gestures} dict which maps gesture identifiers to script names. They can also be bound on an instance using the L{bindGesture} method. @cvar scriptCategory: If present, a translatable string displayed to the user as the category for scripts in this class; e.g. in the Input Gestures dialog. This can be overridden for individual scripts by setting a C{category} attribute on the script method. @type scriptCategory: basestring """ __metaclass__ = ScriptableType def __init__(self): #: Maps input gestures to script functions. #: @type: dict self._gestureMap = {} # Bind gestures specified on the class. # This includes gestures specified on decorated scripts. # This does not include the gestures that are added when creating a DynamicNVDAObjectType. for cls in reversed(self.__class__.__mro__): try: self.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__)) except AttributeError: pass try: self.bindGestures(cls._scriptDecoratorGestures) except AttributeError: pass super(ScriptableObject, self).__init__() def bindGesture(self, gestureIdentifier, scriptName): """Bind an input gesture to a script. @param gestureIdentifier: The identifier of the input gesture. @type gestureIdentifier: str @param scriptName: The name of the script, which is the name of the method excluding the C{script_} prefix. @type scriptName: str @raise LookupError: If there is no script with the provided name. """ # Don't store the instance method, as this causes a circular reference # and instance methods are meant to be generated on retrieval anyway. func = getattr(self.__class__, "script_%s" % scriptName, None) if not func: raise LookupError("No such script: %s" % func) # Import late to avoid circular import. import inputCore self._gestureMap[inputCore.normalizeGestureIdentifier(gestureIdentifier)] = func def removeGestureBinding(self,gestureIdentifier): """ Removes the binding for the given gesture identifier if a binding exists. @param gestureIdentifier: The identifier of the input gesture. @type gestureIdentifier: str @raise LookupError: If there is no binding for this gesture """ # Import late to avoid circular import. import inputCore del self._gestureMap[inputCore.normalizeGestureIdentifier(gestureIdentifier)] def clearGestureBindings(self): """Remove all input gesture bindings from this object. """ self._gestureMap.clear() def bindGestures(self, gestureMap): """Bind or unbind multiple input gestures. This is a convenience method which simply calls L{bindGesture} for each gesture and script pair, logging any errors. For the case where script is None, L{removeGestureBinding} is called instead. @param gestureMap: A mapping of gesture identifiers to script names. @type gestureMap: dict of str to str """ for gestureIdentifier, scriptName in gestureMap.iteritems(): if scriptName: try: self.bindGesture(gestureIdentifier, scriptName) except LookupError: log.error("Error binding script %s in %r" % (scriptName, self)) else: try: self.removeGestureBinding(gestureIdentifier) except LookupError: pass def getScript(self,gesture): """Retrieve the script bound to a given gesture. @param gesture: The input gesture in question. @type gesture: L{inputCore.InputGesture} @return: The script function or C{None} if none was found. @rtype: script function """ for identifier in gesture.normalizedIdentifiers: try: # Convert to instance method. return self._gestureMap[identifier].__get__(self, self.__class__) except KeyError: continue else: return None #: A value for sleepMode which indicates that NVDA should fully sleep for this object; #: i.e. braille and speech via NVDA controller client is disabled and the user cannot disable sleep mode. SLEEP_FULL = "full"
1
22,623
can you define the string format on a different line, so it can be named, and so that it does not have to be constructed twice?
nvaccess-nvda
py
@@ -91,3 +91,9 @@ class AnsibleGalaxyInstall: except sh.ErrorReturnCode as e: LOG.error('ERROR: {}'.format(e)) utilities.sysexit(e.exit_code) + + def download(self, config_file): + utilities.print_info('Installing role dependencies ...') + self.add_env_arg('ANSIBLE_CONFIG', config_file) + self.bake() + self.execute()
1
# Copyright (c) 2015-2016 Cisco Systems # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import sh from molecule import utilities LOG = utilities.get_logger(__name__) class AnsibleGalaxyInstall: def __init__(self, requirements_file, _env=None, _out=LOG.info, _err=LOG.error): """ Sets up requirements for ansible-galaxy :param requirements_file: Path to requirements file for ansible-galaxy :param _env: Environment dictionary to use. os.environ.copy() is used by default :param _out: Function passed to sh for STDOUT :param _err: Function passed to sh for STDERR :return: None """ self.env = _env if _env else os.environ.copy() self.out = _out self.err = _err self.requirements_file = requirements_file self.galaxy = None # defaults can be redefined with call to add_env_arg() before baking self.add_env_arg('PYTHONUNBUFFERED', '1') self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true') def bake(self): """ Bake ansible-galaxy command so it's ready to execute. :return: None """ self.galaxy = sh.ansible_galaxy.bake('install', '-f', '-r', self.requirements_file, _env=self.env, _out=self.out, _err=self.err) def add_env_arg(self, name, value): """ Adds argument to environment passed to ansible-galaxy :param name: Name of argument to be added :param value: Value of argument to be added :return: None """ self.env[name] = value def execute(self): """ Executes ansible-galaxy install :return: sh.stdout on success, else None :return: None """ if self.galaxy is None: self.bake() try: return self.galaxy().stdout except sh.ErrorReturnCode as e: LOG.error('ERROR: {}'.format(e)) utilities.sysexit(e.exit_code)
1
6,481
Can we write a unit test for this.
ansible-community-molecule
py
@@ -685,6 +685,8 @@ class AbstractTab(QWidget): @pyqtSlot(bool) def _on_load_finished(self, ok): + sess_manager = objreg.get('session-manager') + sess_manager.session_save('_autosave', quiet=True, force=True) if ok and not self._has_ssl_errors: if self.url().scheme() == 'https': self._set_load_status(usertypes.LoadStatus.success_https)
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Base class for a wrapper over QWebView/QWebEngineView.""" import itertools from PyQt5.QtCore import pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QWidget, QApplication from qutebrowser.keyinput import modeman from qutebrowser.config import config from qutebrowser.utils import (utils, objreg, usertypes, message, log, qtutils, urlutils) from qutebrowser.misc import miscwidgets from qutebrowser.browser import mouse, hints tab_id_gen = itertools.count(0) def create(win_id, parent=None): """Get a QtWebKit/QtWebEngine tab object. Args: win_id: The window ID where the tab will be shown. parent: The Qt parent to set. """ # Importing modules here so we don't depend on QtWebEngine without the # argument and to avoid circular imports. mode_manager = modeman.instance(win_id) if objreg.get('args').backend == 'webengine': from qutebrowser.browser.webengine import webenginetab tab_class = webenginetab.WebEngineTab else: from qutebrowser.browser.webkit import webkittab tab_class = webkittab.WebKitTab return tab_class(win_id=win_id, mode_manager=mode_manager, parent=parent) def init(args): """Initialize backend-specific modules.""" if args.backend == 'webengine': from qutebrowser.browser.webengine import webenginetab webenginetab.init() else: from qutebrowser.browser.webkit import webkittab webkittab.init() class WebTabError(Exception): """Base class for various errors.""" class UnsupportedOperationError(WebTabError): """Raised when an operation is not supported with the given backend.""" class TabData: """A simple namespace with a fixed set of attributes. Attributes: keep_icon: Whether the (e.g. cloned) icon should not be cleared on page load. inspector: The QWebInspector used for this webview. viewing_source: Set if we're currently showing a source view. open_target: How the next clicked link should be opened. override_target: Override for open_target for fake clicks (like hints). """ def __init__(self): self.keep_icon = False self.viewing_source = False self.inspector = None self.open_target = usertypes.ClickTarget.normal self.override_target = None def combined_target(self): if self.override_target is not None: return self.override_target else: return self.open_target class AbstractPrinting: """Attribute of AbstractTab for printing the page.""" def __init__(self): self._widget = None def check_pdf_support(self): raise NotImplementedError def check_printer_support(self): raise NotImplementedError def to_pdf(self, filename): raise NotImplementedError def to_printer(self, printer): raise NotImplementedError class AbstractSearch(QObject): """Attribute of AbstractTab for doing searches. Attributes: text: The last thing this view was searched for. _flags: The flags of the last search (needs to be set by subclasses). _widget: The underlying WebView widget. """ def __init__(self, parent=None): super().__init__(parent) self._widget = None self.text = None def search(self, text, *, ignore_case=False, reverse=False, result_cb=None): """Find the given text on the page. Args: text: The text to search for. ignore_case: Search case-insensitively. (True/False/'smart') reverse: Reverse search direction. result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError def clear(self): """Clear the current search.""" raise NotImplementedError def prev_result(self, *, result_cb=None): """Go to the previous result of the current search. Args: result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError def next_result(self, *, result_cb=None): """Go to the next result of the current search. Args: result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError class AbstractZoom(QObject): """Attribute of AbstractTab for controlling zoom. Attributes: _neighborlist: A NeighborList with the zoom levels. _default_zoom_changed: Whether the zoom was changed from the default. """ def __init__(self, win_id, parent=None): super().__init__(parent) self._widget = None self._win_id = win_id self._default_zoom_changed = False self._init_neighborlist() objreg.get('config').changed.connect(self._on_config_changed) # # FIXME:qtwebengine is this needed? # # For some reason, this signal doesn't get disconnected automatically # # when the WebView is destroyed on older PyQt versions. # # See https://github.com/The-Compiler/qutebrowser/issues/390 # self.destroyed.connect(functools.partial( # cfg.changed.disconnect, self.init_neighborlist)) @pyqtSlot(str, str) def _on_config_changed(self, section, option): if section == 'ui' and option in ['zoom-levels', 'default-zoom']: if not self._default_zoom_changed: factor = float(config.get('ui', 'default-zoom')) / 100 self._set_factor_internal(factor) self._default_zoom_changed = False self._init_neighborlist() def _init_neighborlist(self): """Initialize self._neighborlist.""" levels = config.get('ui', 'zoom-levels') self._neighborlist = usertypes.NeighborList( levels, mode=usertypes.NeighborList.Modes.edge) self._neighborlist.fuzzyval = config.get('ui', 'default-zoom') def offset(self, offset): """Increase/Decrease the zoom level by the given offset. Args: offset: The offset in the zoom level list. Return: The new zoom percentage. """ level = self._neighborlist.getitem(offset) self.set_factor(float(level) / 100, fuzzyval=False) return level def set_factor(self, factor, *, fuzzyval=True): """Zoom to a given zoom factor. Args: factor: The zoom factor as float. fuzzyval: Whether to set the NeighborLists fuzzyval. """ if fuzzyval: self._neighborlist.fuzzyval = int(factor * 100) if factor < 0: raise ValueError("Can't zoom to factor {}!".format(factor)) self._default_zoom_changed = True self._set_factor_internal(factor) def factor(self): raise NotImplementedError def set_default(self): default_zoom = config.get('ui', 'default-zoom') self._set_factor_internal(float(default_zoom) / 100) class AbstractCaret(QObject): """Attribute of AbstractTab for caret browsing.""" def __init__(self, win_id, tab, mode_manager, parent=None): super().__init__(parent) self._tab = tab self._win_id = win_id self._widget = None self.selection_enabled = False mode_manager.entered.connect(self._on_mode_entered) mode_manager.left.connect(self._on_mode_left) def _on_mode_entered(self, mode): raise NotImplementedError def _on_mode_left(self): raise NotImplementedError def move_to_next_line(self, count=1): raise NotImplementedError def move_to_prev_line(self, count=1): raise NotImplementedError def move_to_next_char(self, count=1): raise NotImplementedError def move_to_prev_char(self, count=1): raise NotImplementedError def move_to_end_of_word(self, count=1): raise NotImplementedError def move_to_next_word(self, count=1): raise NotImplementedError def move_to_prev_word(self, count=1): raise NotImplementedError def move_to_start_of_line(self): raise NotImplementedError def move_to_end_of_line(self): raise NotImplementedError def move_to_start_of_next_block(self, count=1): raise NotImplementedError def move_to_start_of_prev_block(self, count=1): raise NotImplementedError def move_to_end_of_next_block(self, count=1): raise NotImplementedError def move_to_end_of_prev_block(self, count=1): raise NotImplementedError def move_to_start_of_document(self): raise NotImplementedError def move_to_end_of_document(self): raise NotImplementedError def toggle_selection(self): raise NotImplementedError def drop_selection(self): raise NotImplementedError def has_selection(self): raise NotImplementedError def selection(self, html=False): raise NotImplementedError def follow_selected(self, *, tab=False): raise NotImplementedError class AbstractScroller(QObject): """Attribute of AbstractTab to manage scroll position.""" perc_changed = pyqtSignal(int, int) def __init__(self, tab, parent=None): super().__init__(parent) self._tab = tab self._widget = None self.perc_changed.connect(self._log_scroll_pos_change) @pyqtSlot() def _log_scroll_pos_change(self): log.webview.vdebug("Scroll position changed to {}".format( self.pos_px())) def _init_widget(self, widget): self._widget = widget def pos_px(self): raise NotImplementedError def pos_perc(self): raise NotImplementedError def to_perc(self, x=None, y=None): raise NotImplementedError def to_point(self, point): raise NotImplementedError def delta(self, x=0, y=0): raise NotImplementedError def delta_page(self, x=0, y=0): raise NotImplementedError def up(self, count=1): raise NotImplementedError def down(self, count=1): raise NotImplementedError def left(self, count=1): raise NotImplementedError def right(self, count=1): raise NotImplementedError def top(self): raise NotImplementedError def bottom(self): raise NotImplementedError def page_up(self, count=1): raise NotImplementedError def page_down(self, count=1): raise NotImplementedError def at_top(self): raise NotImplementedError def at_bottom(self): raise NotImplementedError class AbstractHistory: """The history attribute of a AbstractTab.""" def __init__(self, tab): self._tab = tab self._history = None def __len__(self): return len(self._history) def __iter__(self): return iter(self._history.items()) def current_idx(self): raise NotImplementedError def back(self): raise NotImplementedError def forward(self): raise NotImplementedError def can_go_back(self): raise NotImplementedError def can_go_forward(self): raise NotImplementedError def serialize(self): """Serialize into an opaque format understood by self.deserialize.""" raise NotImplementedError def deserialize(self, data): """Serialize from a format produced by self.serialize.""" raise NotImplementedError def load_items(self, items): """Deserialize from a list of WebHistoryItems.""" raise NotImplementedError class AbstractElements: """Finding and handling of elements on the page.""" def __init__(self, tab): self._widget = None self._tab = tab def find_css(self, selector, callback, *, only_visible=False): """Find all HTML elements matching a given selector async. Args: callback: The callback to be called when the search finished. selector: The CSS selector to search for. only_visible: Only show elements which are visible on screen. """ raise NotImplementedError def find_id(self, elem_id, callback): """Find the HTML element with the given ID async. Args: callback: The callback to be called when the search finished. elem_id: The ID to search for. """ raise NotImplementedError def find_focused(self, callback): """Find the focused element on the page async. Args: callback: The callback to be called when the search finished. Called with a WebEngineElement or None. """ raise NotImplementedError def find_at_pos(self, pos, callback): """Find the element at the given position async. This is also called "hit test" elsewhere. Args: pos: The QPoint to get the element for. callback: The callback to be called when the search finished. Called with a WebEngineElement or None. """ raise NotImplementedError class AbstractTab(QWidget): """A wrapper over the given widget to hide its API and expose another one. We use this to unify QWebView and QWebEngineView. Class attributes: WIDGET_CLASS: The class of the main widget recieving events. Needs to be overridden by subclasses. Attributes: history: The AbstractHistory for the current tab. registry: The ObjectRegistry associated with this tab. _load_status: loading status of this page Accessible via load_status() method. _has_ssl_errors: Whether SSL errors happened. Needs to be set by subclasses. for properties, see WebView/WebEngineView docs. Signals: See related Qt signals. new_tab_requested: Emitted when a new tab should be opened with the given URL. load_status_changed: The loading status changed """ window_close_requested = pyqtSignal() link_hovered = pyqtSignal(str) load_started = pyqtSignal() load_progress = pyqtSignal(int) load_finished = pyqtSignal(bool) icon_changed = pyqtSignal(QIcon) title_changed = pyqtSignal(str) load_status_changed = pyqtSignal(str) new_tab_requested = pyqtSignal(QUrl) url_changed = pyqtSignal(QUrl) shutting_down = pyqtSignal() contents_size_changed = pyqtSignal(QSizeF) add_history_item = pyqtSignal(QUrl, QUrl, str) # url, requested url, title WIDGET_CLASS = None def __init__(self, win_id, mode_manager, parent=None): self.win_id = win_id self.tab_id = next(tab_id_gen) super().__init__(parent) self.registry = objreg.ObjectRegistry() tab_registry = objreg.get('tab-registry', scope='window', window=win_id) tab_registry[self.tab_id] = self objreg.register('tab', self, registry=self.registry) # self.history = AbstractHistory(self) # self.scroller = AbstractScroller(self, parent=self) # self.caret = AbstractCaret(win_id=win_id, tab=self, # mode_manager=mode_manager, parent=self) # self.zoom = AbstractZoom(win_id=win_id) # self.search = AbstractSearch(parent=self) # self.printing = AbstractPrinting() # self.elements = AbstractElements(self) self.data = TabData() self._layout = miscwidgets.WrapperLayout(self) self._widget = None self._progress = 0 self._has_ssl_errors = False self._mode_manager = mode_manager self._load_status = usertypes.LoadStatus.none self._mouse_event_filter = mouse.MouseEventFilter( self, widget_class=self.WIDGET_CLASS, parent=self) self.backend = None # FIXME:qtwebengine Should this be public api via self.hints? # Also, should we get it out of objreg? hintmanager = hints.HintManager(win_id, self.tab_id, parent=self) objreg.register('hintmanager', hintmanager, scope='tab', window=self.win_id, tab=self.tab_id) def _set_widget(self, widget): # pylint: disable=protected-access self._widget = widget self._layout.wrap(self, widget) self.history._history = widget.history() self.scroller._init_widget(widget) self.caret._widget = widget self.zoom._widget = widget self.search._widget = widget self.printing._widget = widget self.elements._widget = widget self._install_event_filter() def _install_event_filter(self): raise NotImplementedError def _set_load_status(self, val): """Setter for load_status.""" if not isinstance(val, usertypes.LoadStatus): raise TypeError("Type {} is no LoadStatus member!".format(val)) log.webview.debug("load status for {}: {}".format(repr(self), val)) self._load_status = val self.load_status_changed.emit(val.name) def _event_target(self): """Return the widget events should be sent to.""" raise NotImplementedError def send_event(self, evt): """Send the given event to the underlying widget. The event will be sent via QApplication.postEvent. Note that a posted event may not be re-used in any way! """ # This only gives us some mild protection against re-using events, but # it's certainly better than a segfault. if getattr(evt, 'posted', False): raise AssertionError("Can't re-use an event which was already " "posted!") recipient = self._event_target() evt.posted = True QApplication.postEvent(recipient, evt) @pyqtSlot(QUrl) def _on_link_clicked(self, url): log.webview.debug("link clicked: url {}, override target {}, " "open_target {}".format( url.toDisplayString(), self.data.override_target, self.data.open_target)) if not url.isValid(): msg = urlutils.get_errstring(url, "Invalid link clicked") message.error(msg) self.data.open_target = usertypes.ClickTarget.normal return False target = self.data.combined_target() if target == usertypes.ClickTarget.normal: return elif target == usertypes.ClickTarget.tab: win_id = self.win_id bg_tab = False elif target == usertypes.ClickTarget.tab_bg: win_id = self.win_id bg_tab = True elif target == usertypes.ClickTarget.window: from qutebrowser.mainwindow import mainwindow window = mainwindow.MainWindow() window.show() win_id = window.win_id bg_tab = False else: raise ValueError("Invalid ClickTarget {}".format(target)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) tabbed_browser.tabopen(url, background=bg_tab) self.data.open_target = usertypes.ClickTarget.normal @pyqtSlot(QUrl) def _on_url_changed(self, url): """Update title when URL has changed and no title is available.""" if url.isValid() and not self.title(): self.title_changed.emit(url.toDisplayString()) self.url_changed.emit(url) @pyqtSlot() def _on_load_started(self): self._progress = 0 self._has_ssl_errors = False self.data.viewing_source = False self._set_load_status(usertypes.LoadStatus.loading) self.load_started.emit() def _handle_auto_insert_mode(self, ok): """Handle auto-insert-mode after loading finished.""" if not config.get('input', 'auto-insert-mode') or not ok: return cur_mode = self._mode_manager.mode if cur_mode == usertypes.KeyMode.insert: return def _auto_insert_mode_cb(elem): """Called from JS after finding the focused element.""" if elem is None: log.webview.debug("No focused element!") return if elem.is_editable(): modeman.enter(self.win_id, usertypes.KeyMode.insert, 'load finished', only_if_normal=True) self.elements.find_focused(_auto_insert_mode_cb) @pyqtSlot(bool) def _on_load_finished(self, ok): if ok and not self._has_ssl_errors: if self.url().scheme() == 'https': self._set_load_status(usertypes.LoadStatus.success_https) else: self._set_load_status(usertypes.LoadStatus.success) elif ok: self._set_load_status(usertypes.LoadStatus.warn) else: self._set_load_status(usertypes.LoadStatus.error) self.load_finished.emit(ok) if not self.title(): self.title_changed.emit(self.url().toDisplayString()) self._handle_auto_insert_mode(ok) @pyqtSlot() def _on_history_trigger(self): """Emit add_history_item when triggered by backend-specific signal.""" raise NotImplementedError @pyqtSlot(int) def _on_load_progress(self, perc): self._progress = perc self.load_progress.emit(perc) @pyqtSlot() def _on_ssl_errors(self): self._has_ssl_errors = True def url(self, requested=False): raise NotImplementedError def progress(self): return self._progress def load_status(self): return self._load_status def _openurl_prepare(self, url): qtutils.ensure_valid(url) self.title_changed.emit(url.toDisplayString()) def openurl(self, url): raise NotImplementedError def reload(self, *, force=False): raise NotImplementedError def stop(self): raise NotImplementedError def clear_ssl_errors(self): raise NotImplementedError def dump_async(self, callback, *, plain=False): """Dump the current page to a file ascync. The given callback will be called with the result when dumping is complete. """ raise NotImplementedError def run_js_async(self, code, callback=None, *, world=None): """Run javascript async. The given callback will be called with the result when running JS is complete. Args: code: The javascript code to run. callback: The callback to call with the result, or None. world: A world ID (int or usertypes.JsWorld member) to run the JS in the main world or in another isolated world. """ raise NotImplementedError def has_js(self): """Check if qutebrowser can run javascript in this tab.""" raise NotImplementedError def shutdown(self): raise NotImplementedError def title(self): raise NotImplementedError def icon(self): raise NotImplementedError def set_html(self, html, base_url): raise NotImplementedError def __repr__(self): try: url = utils.elide(self.url().toDisplayString(QUrl.EncodeUnicode), 100) except AttributeError: url = '<AttributeError>' return utils.get_repr(self, tab_id=self.tab_id, url=url)
1
16,957
Same as above, use `.delete('_autosave')`, not the command handler.
qutebrowser-qutebrowser
py
@@ -68,18 +68,7 @@ class BatchDataReader extends BaseDataReader<ColumnarBatch> { // update the current file for Spark's filename() function InputFileBlockHolder.set(file.path().toString(), task.start(), task.length()); - // schema or rows returned by readers - PartitionSpec spec = task.spec(); - Set<Integer> idColumns = spec.identitySourceIds(); - Schema partitionSchema = TypeUtil.select(expectedSchema, idColumns); - boolean projectsIdentityPartitionColumns = !partitionSchema.columns().isEmpty(); - - Map<Integer, ?> idToConstant; - if (projectsIdentityPartitionColumns) { - idToConstant = PartitionUtil.constantsMap(task, BatchDataReader::convertConstant); - } else { - idToConstant = ImmutableMap.of(); - } + Map<Integer, ?> idToConstant = PartitionUtil.constantsMap(task, BatchDataReader::convertConstant); CloseableIterable<ColumnarBatch> iter; InputFile location = getInputFile(task);
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import java.util.Map; import java.util.Set; import org.apache.arrow.vector.NullCheckingForGet; import org.apache.iceberg.CombinedScanTask; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileFormat; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.encryption.EncryptionManager; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.CloseableIterator; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.mapping.NameMappingParser; import org.apache.iceberg.orc.ORC; import org.apache.iceberg.parquet.Parquet; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.spark.data.vectorized.VectorizedSparkOrcReaders; import org.apache.iceberg.spark.data.vectorized.VectorizedSparkParquetReaders; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.util.PartitionUtil; import org.apache.spark.rdd.InputFileBlockHolder; import org.apache.spark.sql.vectorized.ColumnarBatch; class BatchDataReader extends BaseDataReader<ColumnarBatch> { private final Schema expectedSchema; private final String nameMapping; private final boolean caseSensitive; private final int batchSize; BatchDataReader( CombinedScanTask task, Schema expectedSchema, String nameMapping, FileIO fileIo, EncryptionManager encryptionManager, boolean caseSensitive, int size) { super(task, fileIo, encryptionManager); this.expectedSchema = expectedSchema; this.nameMapping = nameMapping; this.caseSensitive = caseSensitive; this.batchSize = size; } @Override CloseableIterator<ColumnarBatch> open(FileScanTask task) { DataFile file = task.file(); // update the current file for Spark's filename() function InputFileBlockHolder.set(file.path().toString(), task.start(), task.length()); // schema or rows returned by readers PartitionSpec spec = task.spec(); Set<Integer> idColumns = spec.identitySourceIds(); Schema partitionSchema = TypeUtil.select(expectedSchema, idColumns); boolean projectsIdentityPartitionColumns = !partitionSchema.columns().isEmpty(); Map<Integer, ?> idToConstant; if (projectsIdentityPartitionColumns) { idToConstant = PartitionUtil.constantsMap(task, BatchDataReader::convertConstant); } else { idToConstant = ImmutableMap.of(); } CloseableIterable<ColumnarBatch> iter; InputFile location = getInputFile(task); Preconditions.checkNotNull(location, "Could not find InputFile associated with FileScanTask"); if (task.file().format() == FileFormat.PARQUET) { Parquet.ReadBuilder builder = Parquet.read(location) .project(expectedSchema) .split(task.start(), task.length()) .createBatchedReaderFunc(fileSchema -> VectorizedSparkParquetReaders.buildReader(expectedSchema, fileSchema, /* setArrowValidityVector */ NullCheckingForGet.NULL_CHECKING_ENABLED, idToConstant)) .recordsPerBatch(batchSize) .filter(task.residual()) .caseSensitive(caseSensitive) // Spark eagerly consumes the batches. So the underlying memory allocated could be reused // without worrying about subsequent reads clobbering over each other. This improves // read performance as every batch read doesn't have to pay the cost of allocating memory. .reuseContainers(); if (nameMapping != null) { builder.withNameMapping(NameMappingParser.fromJson(nameMapping)); } iter = builder.build(); } else if (task.file().format() == FileFormat.ORC) { Schema schemaWithoutConstants = TypeUtil.selectNot(expectedSchema, idToConstant.keySet()); ORC.ReadBuilder builder = ORC.read(location) .project(schemaWithoutConstants) .split(task.start(), task.length()) .createBatchedReaderFunc(fileSchema -> VectorizedSparkOrcReaders.buildReader(expectedSchema, fileSchema, idToConstant)) .recordsPerBatch(batchSize) .filter(task.residual()) .caseSensitive(caseSensitive); if (nameMapping != null) { builder.withNameMapping(NameMappingParser.fromJson(nameMapping)); } iter = builder.build(); } else { throw new UnsupportedOperationException( "Format: " + task.file().format() + " not supported for batched reads"); } return iter.iterator(); } }
1
30,609
It isn't necessary to check whether there are projected ID columns. The code is shorter if the values are available by default, even if they aren't used. This fixes the problem where there are constants to add (like `_file`) but no identity partition values are projected.
apache-iceberg
java
@@ -368,6 +368,8 @@ class RemoteConnection(object): ('POST', '/session/$sessionId/window/rect'), Command.GET_WINDOW_RECT: ('GET', '/session/$sessionId/window/rect'), + Command.W3C_MINIMIZE_WINDOW: + ('POST', '/session/$sessionId/window/minimize'), Command.MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/$windowHandle/maximize'), Command.W3C_MAXIMIZE_WINDOW:
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import socket import string import base64 try: import http.client as httplib from urllib import request as url_request from urllib import parse except ImportError: # above is available in py3+, below is py2.7 import httplib as httplib import urllib2 as url_request import urlparse as parse from selenium.webdriver.common import utils as common_utils from .command import Command from .errorhandler import ErrorCode from . import utils LOGGER = logging.getLogger(__name__) class Request(url_request.Request): """ Extends the url_request.Request to support all HTTP request types. """ def __init__(self, url, data=None, method=None): """ Initialise a new HTTP request. :Args: - url - String for the URL to send the request to. - data - Data to send with the request. """ if method is None: method = data is not None and 'POST' or 'GET' elif method != 'POST' and method != 'PUT': data = None self._method = method url_request.Request.__init__(self, url, data=data) def get_method(self): """ Returns the HTTP method used by this request. """ return self._method class Response(object): """ Represents an HTTP response. """ def __init__(self, fp, code, headers, url): """ Initialise a new Response. :Args: - fp - The response body file object. - code - The HTTP status code returned by the server. - headers - A dictionary of headers returned by the server. - url - URL of the retrieved resource represented by this Response. """ self.fp = fp self.read = fp.read self.code = code self.headers = headers self.url = url def close(self): """ Close the response body file object. """ self.read = None self.fp = None def info(self): """ Returns the response headers. """ return self.headers def geturl(self): """ Returns the URL for the resource returned in this response. """ return self.url class HttpErrorHandler(url_request.HTTPDefaultErrorHandler): """ A custom HTTP error handler. Used to return Response objects instead of raising an HTTPError exception. """ def http_error_default(self, req, fp, code, msg, headers): """ Default HTTP error handler. :Args: - req - The original Request object. - fp - The response body file object. - code - The HTTP status code returned by the server. - msg - The HTTP status message returned by the server. - headers - The response headers. :Returns: A new Response object. """ return Response(fp, code, headers, req.get_full_url()) class RemoteConnection(object): """A connection with the Remote WebDriver server. Communicates with the server using the WebDriver wire protocol: https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol""" _timeout = socket._GLOBAL_DEFAULT_TIMEOUT @classmethod def get_timeout(cls): """ :Returns: Timeout value in seconds for all http requests made to the Remote Connection """ return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout @classmethod def set_timeout(cls, timeout): """ Override the default timeout :Args: - timeout - timeout value for http requests in seconds """ cls._timeout = timeout @classmethod def reset_timeout(cls): """ Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT """ cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT @classmethod def get_remote_connection_headers(cls, parsed_url, keep_alive=False): """ Get headers for remote request. :Args: - parsed_url - The parsed url - keep_alive (Boolean) - Is this a keep-alive connection (default: False) """ headers = { 'Accept': 'application/json', 'Content-Type': 'application/json;charset=UTF-8', 'User-Agent': 'Python http auth' } if parsed_url.username: base64string = base64.b64encode('{0.username}:{0.password}'.format(parsed_url).encode()) headers.update({ 'Authorization': 'Basic {}'.format(base64string.decode()) }) if keep_alive: headers.update({ 'Connection': 'keep-alive' }) return headers def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True): # Attempt to resolve the hostname and get an IP address. self.keep_alive = keep_alive parsed_url = parse.urlparse(remote_server_addr) addr = parsed_url.hostname if parsed_url.hostname and resolve_ip: port = parsed_url.port or None if parsed_url.scheme == "https": ip = parsed_url.hostname else: ip = common_utils.find_connectable_ip(parsed_url.hostname, port=port) if ip: netloc = ip addr = netloc if parsed_url.port: netloc = common_utils.join_host_port(netloc, parsed_url.port) if parsed_url.username: auth = parsed_url.username if parsed_url.password: auth += ':%s' % parsed_url.password netloc = '%s@%s' % (auth, netloc) remote_server_addr = parse.urlunparse( (parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) else: LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname) self._url = remote_server_addr if keep_alive: self._conn = httplib.HTTPConnection( str(addr), str(parsed_url.port), timeout=self._timeout) self._commands = { Command.STATUS: ('GET', '/status'), Command.NEW_SESSION: ('POST', '/session'), Command.GET_ALL_SESSIONS: ('GET', '/sessions'), Command.QUIT: ('DELETE', '/session/$sessionId'), Command.GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window_handle'), Command.W3C_GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window'), Command.GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window_handles'), Command.W3C_GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window/handles'), Command.GET: ('POST', '/session/$sessionId/url'), Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'), Command.GO_BACK: ('POST', '/session/$sessionId/back'), Command.REFRESH: ('POST', '/session/$sessionId/refresh'), Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'), Command.W3C_EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute/sync'), Command.W3C_EXECUTE_SCRIPT_ASYNC: ('POST', '/session/$sessionId/execute/async'), Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'), Command.GET_TITLE: ('GET', '/session/$sessionId/title'), Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'), Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'), Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'), Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'), Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'), Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'), Command.GET_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/element/active'), Command.FIND_CHILD_ELEMENT: ('POST', '/session/$sessionId/element/$id/element'), Command.FIND_CHILD_ELEMENTS: ('POST', '/session/$sessionId/element/$id/elements'), Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'), Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'), Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'), Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'), Command.SEND_KEYS_TO_ELEMENT: ('POST', '/session/$sessionId/element/$id/value'), Command.SEND_KEYS_TO_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/keys'), Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"), Command.GET_ELEMENT_VALUE: ('GET', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_TAG_NAME: ('GET', '/session/$sessionId/element/$id/name'), Command.IS_ELEMENT_SELECTED: ('GET', '/session/$sessionId/element/$id/selected'), Command.SET_ELEMENT_SELECTED: ('POST', '/session/$sessionId/element/$id/selected'), Command.IS_ELEMENT_ENABLED: ('GET', '/session/$sessionId/element/$id/enabled'), Command.IS_ELEMENT_DISPLAYED: ('GET', '/session/$sessionId/element/$id/displayed'), Command.GET_ELEMENT_LOCATION: ('GET', '/session/$sessionId/element/$id/location'), Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW: ('GET', '/session/$sessionId/element/$id/location_in_view'), Command.GET_ELEMENT_SIZE: ('GET', '/session/$sessionId/element/$id/size'), Command.GET_ELEMENT_RECT: ('GET', '/session/$sessionId/element/$id/rect'), Command.GET_ELEMENT_ATTRIBUTE: ('GET', '/session/$sessionId/element/$id/attribute/$name'), Command.GET_ELEMENT_PROPERTY: ('GET', '/session/$sessionId/element/$id/property/$name'), Command.ELEMENT_EQUALS: ('GET', '/session/$sessionId/element/$id/equals/$other'), Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'), Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'), Command.DELETE_ALL_COOKIES: ('DELETE', '/session/$sessionId/cookie'), Command.DELETE_COOKIE: ('DELETE', '/session/$sessionId/cookie/$name'), Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'), Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'), Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'), Command.CLOSE: ('DELETE', '/session/$sessionId/window'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.IMPLICIT_WAIT: ('POST', '/session/$sessionId/timeouts/implicit_wait'), Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'), Command.SET_SCRIPT_TIMEOUT: ('POST', '/session/$sessionId/timeouts/async_script'), Command.SET_TIMEOUTS: ('POST', '/session/$sessionId/timeouts'), Command.DISMISS_ALERT: ('POST', '/session/$sessionId/dismiss_alert'), Command.W3C_DISMISS_ALERT: ('POST', '/session/$sessionId/alert/dismiss'), Command.ACCEPT_ALERT: ('POST', '/session/$sessionId/accept_alert'), Command.W3C_ACCEPT_ALERT: ('POST', '/session/$sessionId/alert/accept'), Command.SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert_text'), Command.W3C_SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert/text'), Command.GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert_text'), Command.W3C_GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert/text'), Command.SET_ALERT_CREDENTIALS: ('POST', '/session/$sessionId/alert/credentials'), Command.CLICK: ('POST', '/session/$sessionId/click'), Command.W3C_ACTIONS: ('POST', '/session/$sessionId/actions'), Command.W3C_CLEAR_ACTIONS: ('DELETE', '/session/$sessionId/actions'), Command.DOUBLE_CLICK: ('POST', '/session/$sessionId/doubleclick'), Command.MOUSE_DOWN: ('POST', '/session/$sessionId/buttondown'), Command.MOUSE_UP: ('POST', '/session/$sessionId/buttonup'), Command.MOVE_TO: ('POST', '/session/$sessionId/moveto'), Command.GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/$windowHandle/size'), Command.W3C_GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/size'), Command.SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/$windowHandle/size'), Command.W3C_SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/size'), Command.GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/$windowHandle/position'), Command.SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/$windowHandle/position'), Command.W3C_GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/position'), Command.W3C_SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/position'), Command.SET_WINDOW_RECT: ('POST', '/session/$sessionId/window/rect'), Command.GET_WINDOW_RECT: ('GET', '/session/$sessionId/window/rect'), Command.MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/$windowHandle/maximize'), Command.W3C_MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/maximize'), Command.SET_SCREEN_ORIENTATION: ('POST', '/session/$sessionId/orientation'), Command.GET_SCREEN_ORIENTATION: ('GET', '/session/$sessionId/orientation'), Command.SINGLE_TAP: ('POST', '/session/$sessionId/touch/click'), Command.TOUCH_DOWN: ('POST', '/session/$sessionId/touch/down'), Command.TOUCH_UP: ('POST', '/session/$sessionId/touch/up'), Command.TOUCH_MOVE: ('POST', '/session/$sessionId/touch/move'), Command.TOUCH_SCROLL: ('POST', '/session/$sessionId/touch/scroll'), Command.DOUBLE_TAP: ('POST', '/session/$sessionId/touch/doubleclick'), Command.LONG_PRESS: ('POST', '/session/$sessionId/touch/longclick'), Command.FLICK: ('POST', '/session/$sessionId/touch/flick'), Command.EXECUTE_SQL: ('POST', '/session/$sessionId/execute_sql'), Command.GET_LOCATION: ('GET', '/session/$sessionId/location'), Command.SET_LOCATION: ('POST', '/session/$sessionId/location'), Command.GET_APP_CACHE: ('GET', '/session/$sessionId/application_cache'), Command.GET_APP_CACHE_STATUS: ('GET', '/session/$sessionId/application_cache/status'), Command.CLEAR_APP_CACHE: ('DELETE', '/session/$sessionId/application_cache/clear'), Command.GET_NETWORK_CONNECTION: ('GET', '/session/$sessionId/network_connection'), Command.SET_NETWORK_CONNECTION: ('POST', '/session/$sessionId/network_connection'), Command.GET_LOCAL_STORAGE_ITEM: ('GET', '/session/$sessionId/local_storage/key/$key'), Command.REMOVE_LOCAL_STORAGE_ITEM: ('DELETE', '/session/$sessionId/local_storage/key/$key'), Command.GET_LOCAL_STORAGE_KEYS: ('GET', '/session/$sessionId/local_storage'), Command.SET_LOCAL_STORAGE_ITEM: ('POST', '/session/$sessionId/local_storage'), Command.CLEAR_LOCAL_STORAGE: ('DELETE', '/session/$sessionId/local_storage'), Command.GET_LOCAL_STORAGE_SIZE: ('GET', '/session/$sessionId/local_storage/size'), Command.GET_SESSION_STORAGE_ITEM: ('GET', '/session/$sessionId/session_storage/key/$key'), Command.REMOVE_SESSION_STORAGE_ITEM: ('DELETE', '/session/$sessionId/session_storage/key/$key'), Command.GET_SESSION_STORAGE_KEYS: ('GET', '/session/$sessionId/session_storage'), Command.SET_SESSION_STORAGE_ITEM: ('POST', '/session/$sessionId/session_storage'), Command.CLEAR_SESSION_STORAGE: ('DELETE', '/session/$sessionId/session_storage'), Command.GET_SESSION_STORAGE_SIZE: ('GET', '/session/$sessionId/session_storage/size'), Command.GET_LOG: ('POST', '/session/$sessionId/log'), Command.GET_AVAILABLE_LOG_TYPES: ('GET', '/session/$sessionId/log/types'), Command.CURRENT_CONTEXT_HANDLE: ('GET', '/session/$sessionId/context'), Command.CONTEXT_HANDLES: ('GET', '/session/$sessionId/contexts'), Command.SWITCH_TO_CONTEXT: ('POST', '/session/$sessionId/context'), } def execute(self, command, params): """ Send a command to the remote server. Any path subtitutions required for the URL mapped to the command should be included in the command parameters. :Args: - command - A string specifying the command to execute. - params - A dictionary of named parameters to send with the command as its JSON payload. """ command_info = self._commands[command] assert command_info is not None, 'Unrecognised command %s' % command data = utils.dump_json(params) path = string.Template(command_info[1]).substitute(params) url = '%s%s' % (self._url, path) return self._request(command_info[0], url, body=data) def _request(self, method, url, body=None): """ Send an HTTP request to the remote server. :Args: - method - A string for the HTTP method to send the request with. - url - A string for the URL to send the request to. - body - A string for request body. Ignored unless method is POST or PUT. :Returns: A dictionary with the server's parsed JSON response. """ LOGGER.debug('%s %s %s' % (method, url, body)) parsed_url = parse.urlparse(url) headers = self.get_remote_connection_headers(parsed_url, self.keep_alive) if self.keep_alive: if body and method != 'POST' and method != 'PUT': body = None try: self._conn.request(method, parsed_url.path, body, headers) resp = self._conn.getresponse() except (httplib.HTTPException, socket.error): self._conn.close() raise statuscode = resp.status else: password_manager = None if parsed_url.username: netloc = parsed_url.hostname if parsed_url.port: netloc += ":%s" % parsed_url.port cleaned_url = parse.urlunparse(( parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) password_manager = url_request.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password) request = Request(cleaned_url, data=body.encode('utf-8'), method=method) else: request = Request(url, data=body.encode('utf-8'), method=method) for key, val in headers.items(): request.add_header(key, val) if password_manager: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler(), url_request.HTTPBasicAuthHandler(password_manager)) else: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler()) resp = opener.open(request, timeout=self._timeout) statuscode = resp.code if not hasattr(resp, 'getheader'): if hasattr(resp.headers, 'getheader'): resp.getheader = lambda x: resp.headers.getheader(x) elif hasattr(resp.headers, 'get'): resp.getheader = lambda x: resp.headers.get(x) data = resp.read() try: if 300 <= statuscode < 304: return self._request('GET', resp.getheader('location')) body = data.decode('utf-8').replace('\x00', '').strip() if 399 < statuscode <= 500: return {'status': statuscode, 'value': body} content_type = [] if resp.getheader('Content-Type') is not None: content_type = resp.getheader('Content-Type').split(';') if not any([x.startswith('image/png') for x in content_type]): try: data = utils.load_json(body.strip()) except ValueError: if 199 < statuscode < 300: status = ErrorCode.SUCCESS else: status = ErrorCode.UNKNOWN_ERROR return {'status': status, 'value': body.strip()} assert type(data) is dict, ( 'Invalid server response body: %s' % body) # Some of the drivers incorrectly return a response # with no 'value' field when they should return null. if 'value' not in data: data['value'] = None return data else: data = {'status': 0, 'value': body.strip()} return data finally: LOGGER.debug("Finished Request") resp.close()
1
14,688
Update after command rename
SeleniumHQ-selenium
rb
@@ -6,9 +6,9 @@ package net.sourceforge.pmd.lang.java.rule.bestpractices; import java.util.List; +import net.sourceforge.pmd.lang.java.ast.ASTAnyTypeDeclaration; import org.jaxen.JaxenException; -import net.sourceforge.pmd.lang.ast.AbstractNode; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression; import net.sourceforge.pmd.lang.java.ast.ASTArrayInitializer;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.rule.bestpractices; import java.util.List; import org.jaxen.JaxenException; import net.sourceforge.pmd.lang.ast.AbstractNode; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression; import net.sourceforge.pmd.lang.java.ast.ASTArrayInitializer; import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTExpression; import net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTName; import net.sourceforge.pmd.lang.java.ast.ASTPrimaryExpression; import net.sourceforge.pmd.lang.java.ast.ASTPrimaryPrefix; import net.sourceforge.pmd.lang.java.ast.ASTPrimarySuffix; import net.sourceforge.pmd.lang.java.ast.ASTReturnStatement; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclaratorId; import net.sourceforge.pmd.lang.java.ast.ASTVariableInitializer; /** * Implementation note: this rule currently ignores return types of y.x.z, * currently it handles only local type fields. Created on Jan 17, 2005 * * @author mgriffa */ public class MethodReturnsInternalArrayRule extends AbstractSunSecureRule { @Override public Object visit(ASTClassOrInterfaceDeclaration node, Object data) { if (node.isInterface()) { return data; } return super.visit(node, data); } @Override public Object visit(ASTMethodDeclaration method, Object data) { if (!method.getResultType().returnsArray() || method.isPrivate()) { return data; } List<ASTReturnStatement> returns = method.findDescendantsOfType(ASTReturnStatement.class); ASTClassOrInterfaceDeclaration td = method.getFirstParentOfType(ASTClassOrInterfaceDeclaration.class); for (ASTReturnStatement ret : returns) { final String vn = getReturnedVariableName(ret); if (!isField(vn, td)) { continue; } if (ret.findDescendantsOfType(ASTPrimarySuffix.class).size() > 2) { continue; } if (ret.hasDescendantOfType(ASTAllocationExpression.class)) { continue; } if (hasArraysCopyOf(ret)) { continue; } if (hasClone(ret, vn)) { continue; } if (isEmptyArray(vn, td)) { continue; } if (!isLocalVariable(vn, method)) { addViolation(data, ret, vn); } else { // This is to handle field hiding final ASTPrimaryPrefix pp = ret.getFirstDescendantOfType(ASTPrimaryPrefix.class); if (pp != null && pp.usesThisModifier()) { final ASTPrimarySuffix ps = ret.getFirstDescendantOfType(ASTPrimarySuffix.class); if (ps.hasImageEqualTo(vn)) { addViolation(data, ret, vn); } } } } return data; } private boolean hasClone(ASTReturnStatement ret, String varName) { List<ASTPrimaryExpression> expressions = ret.findDescendantsOfType(ASTPrimaryExpression.class); for (ASTPrimaryExpression e : expressions) { if (e.jjtGetChild(0) instanceof ASTPrimaryPrefix && e.jjtGetNumChildren() == 2 && e.jjtGetChild(1) instanceof ASTPrimarySuffix && ((ASTPrimarySuffix) e.jjtGetChild(1)).isArguments() && ((ASTPrimarySuffix) e.jjtGetChild(1)).getArgumentCount() == 0) { ASTName name = e.getFirstDescendantOfType(ASTName.class); if (name != null && name.hasImageEqualTo(varName + ".clone")) { return true; } } } return false; } private boolean hasArraysCopyOf(ASTReturnStatement ret) { List<ASTPrimaryExpression> expressions = ret.findDescendantsOfType(ASTPrimaryExpression.class); for (ASTPrimaryExpression e : expressions) { if (e.jjtGetNumChildren() == 2 && e.jjtGetChild(0) instanceof ASTPrimaryPrefix && e.jjtGetChild(0).jjtGetNumChildren() == 1 && e.jjtGetChild(0).jjtGetChild(0) instanceof ASTName && e.jjtGetChild(0).jjtGetChild(0).getImage().endsWith("Arrays.copyOf")) { return true; } } return false; } private boolean isEmptyArray(String varName, AbstractNode typeDeclaration) { final List<ASTFieldDeclaration> fds = typeDeclaration.findDescendantsOfType(ASTFieldDeclaration.class); if (fds != null) { for (ASTFieldDeclaration fd : fds) { final ASTVariableDeclaratorId vid = fd.getFirstDescendantOfType(ASTVariableDeclaratorId.class); if (vid != null && vid.hasImageEqualTo(varName)) { ASTVariableInitializer initializer = fd.getFirstDescendantOfType(ASTVariableInitializer.class); if (initializer != null && initializer.jjtGetNumChildren() == 1) { Node child = initializer.jjtGetChild(0); if (child instanceof ASTArrayInitializer && child.jjtGetNumChildren() == 0) { return true; } else if (child instanceof ASTExpression) { try { List<? extends Node> arrayAllocation = child.findChildNodesWithXPath( "./PrimaryExpression/PrimaryPrefix/AllocationExpression/ArrayDimsAndInits/Expression/PrimaryExpression/PrimaryPrefix/Literal[@IntLiteral=\"true\"][@Image=\"0\"]"); if (arrayAllocation != null && arrayAllocation.size() == 1) { return true; } } catch (JaxenException e) { return false; } } } } } } return false; } }
1
15,990
@KroArtem This is the Checkstyle violation. This import should be grouped with other `net.sourceforge.pmd` imports below, in alphabetical order
pmd-pmd
java
@@ -47,6 +47,7 @@ import ( "github.com/ncw/rclone/lib/rest" "github.com/ncw/swift" "github.com/pkg/errors" + "github.com/ncw/rclone/fs" ) // Register with Fs
1
// Package s3 provides an interface to Amazon S3 oject storage package s3 // FIXME need to prevent anything but ListDir working for s3:// /* Progress of port to aws-sdk * Don't really need o.meta at all? What happens if you CTRL-C a multipart upload * get an incomplete upload * disappears when you delete the bucket */ import ( "encoding/base64" "encoding/hex" "fmt" "io" "net/http" "path" "regexp" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/walk" "github.com/ncw/rclone/lib/pacer" "github.com/ncw/rclone/lib/rest" "github.com/ncw/swift" "github.com/pkg/errors" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "s3", Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)", NewFs: NewFs, Options: []fs.Option{{ Name: fs.ConfigProvider, Help: "Choose your S3 provider.", Examples: []fs.OptionExample{{ Value: "AWS", Help: "Amazon Web Services (AWS) S3", }, { Value: "Alibaba", Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun", }, { Value: "Ceph", Help: "Ceph Object Storage", }, { Value: "DigitalOcean", Help: "Digital Ocean Spaces", }, { Value: "Dreamhost", Help: "Dreamhost DreamObjects", }, { Value: "IBMCOS", Help: "IBM COS S3", }, { Value: "Minio", Help: "Minio Object Storage", }, { Value: "Netease", Help: "Netease Object Storage (NOS)", }, { Value: "Wasabi", Help: "Wasabi Object Storage", }, { Value: "Other", Help: "Any other S3 compatible provider", }}, }, { Name: "env_auth", Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.", Default: false, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter AWS credentials in the next step", }, { Value: "true", Help: "Get AWS credentials from the environment (env vars or IAM)", }}, }, { Name: "access_key_id", Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.", }, { Name: "secret_access_key", Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", }, { Name: "region", Help: "Region to connect to.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "us-east-1", Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.", }, { Value: "us-east-2", Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.", }, { Value: "us-west-2", Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.", }, { Value: "us-west-1", Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.", }, { Value: "ca-central-1", Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.", }, { Value: "eu-west-1", Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.", }, { Value: "eu-west-2", Help: "EU (London) Region\nNeeds location constraint eu-west-2.", }, { Value: "eu-north-1", Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.", }, { Value: "eu-central-1", Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.", }, { Value: "ap-southeast-1", Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.", }, { Value: "ap-southeast-2", Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.", }, { Value: "ap-northeast-1", Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.", }, { Value: "ap-northeast-2", Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.", }, { Value: "ap-south-1", Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.", }, { Value: "sa-east-1", Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.", }}, }, { Name: "region", Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.", Provider: "!AWS,Alibaba", Examples: []fs.OptionExample{{ Value: "", Help: "Use this if unsure. Will use v4 signatures and an empty region.", }, { Value: "other-v2-signature", Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.", }}, }, { Name: "endpoint", Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.", Provider: "AWS", }, { Name: "endpoint", Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.", Provider: "IBMCOS", Examples: []fs.OptionExample{{ Value: "s3-api.us-geo.objectstorage.softlayer.net", Help: "US Cross Region Endpoint", }, { Value: "s3-api.dal.us-geo.objectstorage.softlayer.net", Help: "US Cross Region Dallas Endpoint", }, { Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net", Help: "US Cross Region Washington DC Endpoint", }, { Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net", Help: "US Cross Region San Jose Endpoint", }, { Value: "s3-api.us-geo.objectstorage.service.networklayer.com", Help: "US Cross Region Private Endpoint", }, { Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com", Help: "US Cross Region Dallas Private Endpoint", }, { Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com", Help: "US Cross Region Washington DC Private Endpoint", }, { Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com", Help: "US Cross Region San Jose Private Endpoint", }, { Value: "s3.us-east.objectstorage.softlayer.net", Help: "US Region East Endpoint", }, { Value: "s3.us-east.objectstorage.service.networklayer.com", Help: "US Region East Private Endpoint", }, { Value: "s3.us-south.objectstorage.softlayer.net", Help: "US Region South Endpoint", }, { Value: "s3.us-south.objectstorage.service.networklayer.com", Help: "US Region South Private Endpoint", }, { Value: "s3.eu-geo.objectstorage.softlayer.net", Help: "EU Cross Region Endpoint", }, { Value: "s3.fra-eu-geo.objectstorage.softlayer.net", Help: "EU Cross Region Frankfurt Endpoint", }, { Value: "s3.mil-eu-geo.objectstorage.softlayer.net", Help: "EU Cross Region Milan Endpoint", }, { Value: "s3.ams-eu-geo.objectstorage.softlayer.net", Help: "EU Cross Region Amsterdam Endpoint", }, { Value: "s3.eu-geo.objectstorage.service.networklayer.com", Help: "EU Cross Region Private Endpoint", }, { Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com", Help: "EU Cross Region Frankfurt Private Endpoint", }, { Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com", Help: "EU Cross Region Milan Private Endpoint", }, { Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com", Help: "EU Cross Region Amsterdam Private Endpoint", }, { Value: "s3.eu-gb.objectstorage.softlayer.net", Help: "Great Britain Endpoint", }, { Value: "s3.eu-gb.objectstorage.service.networklayer.com", Help: "Great Britain Private Endpoint", }, { Value: "s3.ap-geo.objectstorage.softlayer.net", Help: "APAC Cross Regional Endpoint", }, { Value: "s3.tok-ap-geo.objectstorage.softlayer.net", Help: "APAC Cross Regional Tokyo Endpoint", }, { Value: "s3.hkg-ap-geo.objectstorage.softlayer.net", Help: "APAC Cross Regional HongKong Endpoint", }, { Value: "s3.seo-ap-geo.objectstorage.softlayer.net", Help: "APAC Cross Regional Seoul Endpoint", }, { Value: "s3.ap-geo.objectstorage.service.networklayer.com", Help: "APAC Cross Regional Private Endpoint", }, { Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com", Help: "APAC Cross Regional Tokyo Private Endpoint", }, { Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com", Help: "APAC Cross Regional HongKong Private Endpoint", }, { Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com", Help: "APAC Cross Regional Seoul Private Endpoint", }, { Value: "s3.mel01.objectstorage.softlayer.net", Help: "Melbourne Single Site Endpoint", }, { Value: "s3.mel01.objectstorage.service.networklayer.com", Help: "Melbourne Single Site Private Endpoint", }, { Value: "s3.tor01.objectstorage.softlayer.net", Help: "Toronto Single Site Endpoint", }, { Value: "s3.tor01.objectstorage.service.networklayer.com", Help: "Toronto Single Site Private Endpoint", }}, }, { // oss endpoints: https://help.aliyun.com/document_detail/31837.html Name: "endpoint", Help: "Endpoint for OSS API.", Provider: "Alibaba", Examples: []fs.OptionExample{{ Value: "oss-cn-hangzhou.aliyuncs.com", Help: "East China 1 (Hangzhou)", }, { Value: "oss-cn-shanghai.aliyuncs.com", Help: "East China 2 (Shanghai)", }, { Value: "oss-cn-qingdao.aliyuncs.com", Help: "North China 1 (Qingdao)", }, { Value: "oss-cn-beijing.aliyuncs.com", Help: "North China 2 (Beijing)", }, { Value: "oss-cn-zhangjiakou.aliyuncs.com", Help: "North China 3 (Zhangjiakou)", }, { Value: "oss-cn-huhehaote.aliyuncs.com", Help: "North China 5 (Huhehaote)", }, { Value: "oss-cn-shenzhen.aliyuncs.com", Help: "South China 1 (Shenzhen)", }, { Value: "oss-cn-hongkong.aliyuncs.com", Help: "Hong Kong (Hong Kong)", }, { Value: "oss-us-west-1.aliyuncs.com", Help: "US West 1 (Silicon Valley)", }, { Value: "oss-us-east-1.aliyuncs.com", Help: "US East 1 (Virginia)", }, { Value: "oss-ap-southeast-1.aliyuncs.com", Help: "Southeast Asia Southeast 1 (Singapore)", }, { Value: "oss-ap-southeast-2.aliyuncs.com", Help: "Asia Pacific Southeast 2 (Sydney)", }, { Value: "oss-ap-southeast-3.aliyuncs.com", Help: "Southeast Asia Southeast 3 (Kuala Lumpur)", }, { Value: "oss-ap-southeast-5.aliyuncs.com", Help: "Asia Pacific Southeast 5 (Jakarta)", }, { Value: "oss-ap-northeast-1.aliyuncs.com", Help: "Asia Pacific Northeast 1 (Japan)", }, { Value: "oss-ap-south-1.aliyuncs.com", Help: "Asia Pacific South 1 (Mumbai)", }, { Value: "oss-eu-central-1.aliyuncs.com", Help: "Central Europe 1 (Frankfurt)", }, { Value: "oss-eu-west-1.aliyuncs.com", Help: "West Europe (London)", }, { Value: "oss-me-east-1.aliyuncs.com", Help: "Middle East 1 (Dubai)", }}, }, { Name: "endpoint", Help: "Endpoint for S3 API.\nRequired when using an S3 clone.", Provider: "!AWS,IBMCOS,Alibaba", Examples: []fs.OptionExample{{ Value: "objects-us-east-1.dream.io", Help: "Dream Objects endpoint", Provider: "Dreamhost", }, { Value: "nyc3.digitaloceanspaces.com", Help: "Digital Ocean Spaces New York 3", Provider: "DigitalOcean", }, { Value: "ams3.digitaloceanspaces.com", Help: "Digital Ocean Spaces Amsterdam 3", Provider: "DigitalOcean", }, { Value: "sgp1.digitaloceanspaces.com", Help: "Digital Ocean Spaces Singapore 1", Provider: "DigitalOcean", }, { Value: "s3.wasabisys.com", Help: "Wasabi US East endpoint", Provider: "Wasabi", }, { Value: "s3.us-west-1.wasabisys.com", Help: "Wasabi US West endpoint", Provider: "Wasabi", }, { Value: "s3.eu-central-1.wasabisys.com", Help: "Wasabi EU Central endpoint", Provider: "Wasabi", }}, }, { Name: "location_constraint", Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", Help: "Empty for US Region, Northern Virginia or Pacific Northwest.", }, { Value: "us-east-2", Help: "US East (Ohio) Region.", }, { Value: "us-west-2", Help: "US West (Oregon) Region.", }, { Value: "us-west-1", Help: "US West (Northern California) Region.", }, { Value: "ca-central-1", Help: "Canada (Central) Region.", }, { Value: "eu-west-1", Help: "EU (Ireland) Region.", }, { Value: "eu-west-2", Help: "EU (London) Region.", }, { Value: "eu-north-1", Help: "EU (Stockholm) Region.", }, { Value: "EU", Help: "EU Region.", }, { Value: "ap-southeast-1", Help: "Asia Pacific (Singapore) Region.", }, { Value: "ap-southeast-2", Help: "Asia Pacific (Sydney) Region.", }, { Value: "ap-northeast-1", Help: "Asia Pacific (Tokyo) Region.", }, { Value: "ap-northeast-2", Help: "Asia Pacific (Seoul)", }, { Value: "ap-south-1", Help: "Asia Pacific (Mumbai)", }, { Value: "sa-east-1", Help: "South America (Sao Paulo) Region.", }}, }, { Name: "location_constraint", Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter", Provider: "IBMCOS", Examples: []fs.OptionExample{{ Value: "us-standard", Help: "US Cross Region Standard", }, { Value: "us-vault", Help: "US Cross Region Vault", }, { Value: "us-cold", Help: "US Cross Region Cold", }, { Value: "us-flex", Help: "US Cross Region Flex", }, { Value: "us-east-standard", Help: "US East Region Standard", }, { Value: "us-east-vault", Help: "US East Region Vault", }, { Value: "us-east-cold", Help: "US East Region Cold", }, { Value: "us-east-flex", Help: "US East Region Flex", }, { Value: "us-south-standard", Help: "US South Region Standard", }, { Value: "us-south-vault", Help: "US South Region Vault", }, { Value: "us-south-cold", Help: "US South Region Cold", }, { Value: "us-south-flex", Help: "US South Region Flex", }, { Value: "eu-standard", Help: "EU Cross Region Standard", }, { Value: "eu-vault", Help: "EU Cross Region Vault", }, { Value: "eu-cold", Help: "EU Cross Region Cold", }, { Value: "eu-flex", Help: "EU Cross Region Flex", }, { Value: "eu-gb-standard", Help: "Great Britain Standard", }, { Value: "eu-gb-vault", Help: "Great Britain Vault", }, { Value: "eu-gb-cold", Help: "Great Britain Cold", }, { Value: "eu-gb-flex", Help: "Great Britain Flex", }, { Value: "ap-standard", Help: "APAC Standard", }, { Value: "ap-vault", Help: "APAC Vault", }, { Value: "ap-cold", Help: "APAC Cold", }, { Value: "ap-flex", Help: "APAC Flex", }, { Value: "mel01-standard", Help: "Melbourne Standard", }, { Value: "mel01-vault", Help: "Melbourne Vault", }, { Value: "mel01-cold", Help: "Melbourne Cold", }, { Value: "mel01-flex", Help: "Melbourne Flex", }, { Value: "tor01-standard", Help: "Toronto Standard", }, { Value: "tor01-vault", Help: "Toronto Vault", }, { Value: "tor01-cold", Help: "Toronto Cold", }, { Value: "tor01-flex", Help: "Toronto Flex", }}, }, { Name: "location_constraint", Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.", Provider: "!AWS,IBMCOS,Alibaba", }, { Name: "acl", Help: `Canned ACL used when creating buckets and storing or copying objects. This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when server side copying objects as S3 doesn't copy the ACL from the source but rather writes a fresh one.`, Examples: []fs.OptionExample{{ Value: "private", Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", Provider: "!IBMCOS", }, { Value: "public-read", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", Provider: "!IBMCOS", }, { Value: "public-read-write", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", Provider: "!IBMCOS", }, { Value: "authenticated-read", Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", Provider: "!IBMCOS", }, { Value: "bucket-owner-read", Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.", Provider: "!IBMCOS", }, { Value: "bucket-owner-full-control", Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.", Provider: "!IBMCOS", }, { Value: "private", Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS", Provider: "IBMCOS", }, { Value: "public-read", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS", Provider: "IBMCOS", }, { Value: "public-read-write", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS", Provider: "IBMCOS", }, { Value: "authenticated-read", Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS", Provider: "IBMCOS", }}, }, { Name: "bucket_acl", Help: `Canned ACL used when creating buckets. For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "private", Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", }, { Value: "public-read", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", }, { Value: "public-read-write", Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", }, { Value: "authenticated-read", Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", }}, }, { Name: "server_side_encryption", Help: "The server-side encryption algorithm used when storing this object in S3.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: "AES256", Help: "AES256", }, { Value: "aws:kms", Help: "aws:kms", }}, }, { Name: "sse_kms_key_id", Help: "If using KMS ID you must provide the ARN of Key.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: "arn:aws:kms:us-east-1:*", Help: "arn:aws:kms:*", }}, }, { Name: "storage_class", Help: "The storage class to use when storing new objects in S3.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", Help: "Default", }, { Value: "STANDARD", Help: "Standard storage class", }, { Value: "REDUCED_REDUNDANCY", Help: "Reduced redundancy storage class", }, { Value: "STANDARD_IA", Help: "Standard Infrequent Access storage class", }, { Value: "ONEZONE_IA", Help: "One Zone Infrequent Access storage class", }, { Value: "GLACIER", Help: "Glacier storage class", }, { Value: "DEEP_ARCHIVE", Help: "Glacier Deep Archive storage class", }}, }, { // Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm Name: "storage_class", Help: "The storage class to use when storing new objects in OSS.", Provider: "Alibaba", Examples: []fs.OptionExample{{ Value: "", Help: "Default", }, { Value: "STANDARD", Help: "Standard storage class", }, { Value: "GLACIER", Help: "Archive storage mode.", }, { Value: "STANDARD_IA", Help: "Infrequent access storage mode.", }}, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5GB.`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Chunk size to use for uploading. When uploading files larger than upload_cutoff they will be uploaded as multipart uploads using this chunk size. Note that "--s3-upload-concurrency" chunks of this size are buffered in memory per transfer. If you are transferring large files over high speed links and you have enough memory, then increasing this will speed up the transfers.`, Default: minChunkSize, Advanced: true, }, { Name: "disable_checksum", Help: "Don't store MD5 checksum with object metadata", Default: false, Advanced: true, }, { Name: "session_token", Help: "An AWS session token", Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. If you are uploading small numbers of large file over high speed link and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 4, Advanced: true, }, { Name: "force_path_style", Help: `If true use path style access if false use virtual hosted style. If this is true (the default) then rclone will use path style access, if false then rclone will use virtual path style. See [the AWS S3 docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) for more info. Some providers (eg Aliyun OSS or Netease COS) require this set to false.`, Default: true, Advanced: true, }, { Name: "v2_auth", Help: `If true use v2 authentication. If this is false (the default) then rclone will use v4 authentication. If it is set then rclone will use v2 authentication. Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`, Default: false, Advanced: true, }, { Name: "use_accelerate_endpoint", Provider: "AWS", Help: `If true use the AWS S3 accelerated endpoint. See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`, Default: false, Advanced: true, }}, }) } // Constants const ( metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime metaMD5Hash = "Md5chksum" // the meta key to store md5hash in listChunkSize = 1000 // number of items to read at once maxRetries = 10 // number of retries to make of operations maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. ) // Options defines the configuration for this backend type Options struct { Provider string `config:"provider"` EnvAuth bool `config:"env_auth"` AccessKeyID string `config:"access_key_id"` SecretAccessKey string `config:"secret_access_key"` Region string `config:"region"` Endpoint string `config:"endpoint"` LocationConstraint string `config:"location_constraint"` ACL string `config:"acl"` BucketACL string `config:"bucket_acl"` ServerSideEncryption string `config:"server_side_encryption"` SSEKMSKeyID string `config:"sse_kms_key_id"` StorageClass string `config:"storage_class"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` DisableChecksum bool `config:"disable_checksum"` SessionToken string `config:"session_token"` UploadConcurrency int `config:"upload_concurrency"` ForcePathStyle bool `config:"force_path_style"` V2Auth bool `config:"v2_auth"` UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"` } // Fs represents a remote s3 server type Fs struct { name string // the name of the remote root string // root of the bucket - ignore all objects above this opt Options // parsed options features *fs.Features // optional features c *s3.S3 // the connection to the s3 server ses *session.Session // the s3 session bucket string // the bucket we are working on bucketOKMu sync.Mutex // mutex to protect bucket OK bucketOK bool // true if we have created the bucket bucketDeleted bool // true if we have deleted the bucket pacer *fs.Pacer // To pace the API calls srv *http.Client // a plain http client } // Object describes a s3 object type Object struct { // Will definitely have everything but meta which may be nil // // List will read everything but meta & mimeType - to fill // that in you need to call readMetaData fs *Fs // what this object is part of remote string // The remote path etag string // md5sum of the object bytes int64 // size of the object lastModified time.Time // Last modified meta map[string]*string // The object metadata if known - may be nil mimeType string // MimeType of object - may be "" } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { if f.root == "" { return f.bucket } return f.bucket + "/" + f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.root == "" { return fmt.Sprintf("S3 bucket %s", f.bucket) } return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // retryErrorCodes is a slice of error codes that we will retry // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var retryErrorCodes = []int{ // 409, // Conflict - various states that could be resolved on a retry 503, // Service Unavailable/Slow Down - "Reduce your request rate" } //S3 is pretty resilient, and the built in retry handling is probably sufficient // as it should notice closed connections and timeouts which are the most likely // sort of failure modes func (f *Fs) shouldRetry(err error) (bool, error) { // If this is an awserr object, try and extract more useful information to determine if we should retry if awsError, ok := err.(awserr.Error); ok { // Simple case, check the original embedded error in case it's generically retryable if fserrors.ShouldRetry(awsError.OrigErr()) { return true, err } // Failing that, if it's a RequestFailure it's probably got an http status code we can check if reqErr, ok := err.(awserr.RequestFailure); ok { // 301 if wrong region for bucket if reqErr.StatusCode() == http.StatusMovedPermanently { urfbErr := f.updateRegionForBucket() if urfbErr != nil { fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr) return false, err } return true, err } for _, e := range retryErrorCodes { if reqErr.StatusCode() == e { return true, err } } } } // Ok, not an awserr, check for generic failure conditions return fserrors.ShouldRetry(err), err } // Pattern to match a s3 path var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) // parseParse parses a s3 'url' func s3ParsePath(path string) (bucket, directory string, err error) { parts := matcher.FindStringSubmatch(path) if parts == nil { err = errors.Errorf("couldn't parse bucket out of s3 path %q", path) } else { bucket, directory = parts[1], parts[2] directory = strings.Trim(directory, "/") } return } // s3Connection makes a connection to s3 func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { // Make the auth v := credentials.Value{ AccessKeyID: opt.AccessKeyID, SecretAccessKey: opt.SecretAccessKey, SessionToken: opt.SessionToken, } lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service def := defaults.Get() def.Config.HTTPClient = lowTimeoutClient // first provider to supply a credential set "wins" providers := []credentials.Provider{ // use static credentials if they're present (checked by provider) &credentials.StaticProvider{Value: v}, // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY &credentials.EnvProvider{}, // A SharedCredentialsProvider retrieves credentials // from the current user's home directory. It checks // AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too. &credentials.SharedCredentialsProvider{}, // Pick up IAM role if we're in an ECS task defaults.RemoteCredProvider(*def.Config, def.Handlers), // Pick up IAM role in case we're on EC2 &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.New(session.New(), &aws.Config{ HTTPClient: lowTimeoutClient, }), ExpiryWindow: 3, }, } cred := credentials.NewChainCredentials(providers) switch { case opt.EnvAuth: // No need for empty checks if "env_auth" is true case v.AccessKeyID == "" && v.SecretAccessKey == "": // if no access key/secret and iam is explicitly disabled then fall back to anon interaction cred = credentials.AnonymousCredentials case v.AccessKeyID == "": return nil, nil, errors.New("access_key_id not found") case v.SecretAccessKey == "": return nil, nil, errors.New("secret_access_key not found") } if opt.Region == "" && opt.Endpoint == "" { opt.Endpoint = "https://s3.amazonaws.com/" } if opt.Region == "" { opt.Region = "us-east-1" } if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint { opt.ForcePathStyle = false } awsConfig := aws.NewConfig(). WithMaxRetries(maxRetries). WithCredentials(cred). WithHTTPClient(fshttp.NewClient(fs.Config)). WithS3ForcePathStyle(opt.ForcePathStyle). WithS3UseAccelerate(opt.UseAccelerateEndpoint) if opt.Region != "" { awsConfig.WithRegion(opt.Region) } if opt.Endpoint != "" { awsConfig.WithEndpoint(opt.Endpoint) } // awsConfig.WithLogLevel(aws.LogDebugWithSigning) awsSessionOpts := session.Options{ Config: *awsConfig, } if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" { // Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env) awsSessionOpts.SharedConfigState = session.SharedConfigEnable // The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source // (from the shared config file) if the passed-in Options.Config.Credentials is nil. awsSessionOpts.Config.Credentials = nil } ses, err := session.NewSessionWithOptions(awsSessionOpts) if err != nil { return nil, nil, err } c := s3.New(ses) if opt.V2Auth || opt.Region == "other-v2-signature" { fs.Debugf(nil, "Using v2 auth") signer := func(req *request.Request) { // Ignore AnonymousCredentials object if req.Config.Credentials == credentials.AnonymousCredentials { return } sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest) } c.Handlers.Sign.Clear() c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) c.Handlers.Sign.PushBack(signer) } return c, ses, nil } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return errors.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func checkUploadCutoff(cs fs.SizeSuffix) error { if cs > maxUploadCutoff { return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff) } return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } // NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, errors.Wrap(err, "s3: chunk size") } err = checkUploadCutoff(opt.UploadCutoff) if err != nil { return nil, errors.Wrap(err, "s3: upload cutoff") } bucket, directory, err := s3ParsePath(root) if err != nil { return nil, err } if opt.ACL == "" { opt.ACL = "private" } if opt.BucketACL == "" { opt.BucketACL = opt.ACL } c, ses, err := s3Connection(opt) if err != nil { return nil, err } f := &Fs{ name: name, root: directory, opt: *opt, c: c, bucket: bucket, ses: ses, pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), srv: fshttp.NewClient(fs.Config), } f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, }).Fill(f) if f.root != "" { f.root += "/" // Check to see if the object exists req := s3.HeadObjectInput{ Bucket: &f.bucket, Key: &directory, } err = f.pacer.Call(func() (bool, error) { _, err = f.c.HeadObject(&req) return f.shouldRetry(err) }) if err == nil { f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } // f.listMultipartUploads() return f, nil } // Return an Object from a path // //If it can't be found it returns the error ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { // Set info but not meta if info.LastModified == nil { fs.Logf(o, "Failed to read last modified") o.lastModified = time.Now() } else { o.lastModified = *info.LastModified } o.etag = aws.StringValue(info.ETag) o.bytes = aws.Int64Value(info.Size) } else { err := o.readMetaData() // reads info and meta, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } // Gets the bucket location func (f *Fs) getBucketLocation() (string, error) { req := s3.GetBucketLocationInput{ Bucket: &f.bucket, } var resp *s3.GetBucketLocationOutput var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.c.GetBucketLocation(&req) return f.shouldRetry(err) }) if err != nil { return "", err } return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil } // Updates the region for the bucket by reading the region from the // bucket then updating the session. func (f *Fs) updateRegionForBucket() error { region, err := f.getBucketLocation() if err != nil { return errors.Wrap(err, "reading bucket location failed") } if aws.StringValue(f.c.Config.Endpoint) != "" { return errors.Errorf("can't set region to %q as endpoint is set", region) } if aws.StringValue(f.c.Config.Region) == region { return errors.Errorf("region is already %q - not updating", region) } // Make a new session with the new region oldRegion := f.opt.Region f.opt.Region = region c, ses, err := s3Connection(&f.opt) if err != nil { return errors.Wrap(err, "creating new session failed") } f.c = c f.ses = ses fs.Logf(f, "Switched region to %q from %q", region, oldRegion) return nil } // listFn is called from list to handle an object. type listFn func(remote string, object *s3.Object, isDirectory bool) error // list the objects into the function supplied // // dir is the starting directory, "" for root // // Set recurse to read sub directories func (f *Fs) list(dir string, recurse bool, fn listFn) error { root := f.root if dir != "" { root += dir + "/" } maxKeys := int64(listChunkSize) delimiter := "" if !recurse { delimiter = "/" } var marker *string for { // FIXME need to implement ALL loop req := s3.ListObjectsInput{ Bucket: &f.bucket, Delimiter: &delimiter, Prefix: &root, MaxKeys: &maxKeys, Marker: marker, } var resp *s3.ListObjectsOutput var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.c.ListObjects(&req) return f.shouldRetry(err) }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr.StatusCode() == http.StatusNotFound { err = fs.ErrorDirNotFound } } return err } rootLength := len(f.root) if !recurse { for _, commonPrefix := range resp.CommonPrefixes { if commonPrefix.Prefix == nil { fs.Logf(f, "Nil common prefix received") continue } remote := *commonPrefix.Prefix if !strings.HasPrefix(remote, f.root) { fs.Logf(f, "Odd name received %q", remote) continue } remote = remote[rootLength:] if strings.HasSuffix(remote, "/") { remote = remote[:len(remote)-1] } err = fn(remote, &s3.Object{Key: &remote}, true) if err != nil { return err } } } for _, object := range resp.Contents { key := aws.StringValue(object.Key) if !strings.HasPrefix(key, f.root) { fs.Logf(f, "Odd name received %q", key) continue } remote := key[rootLength:] // is this a directory marker? if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 { if recurse && remote != "" { // add a directory in if --fast-list since will have no prefixes remote = remote[:len(remote)-1] err = fn(remote, &s3.Object{Key: &remote}, true) if err != nil { return err } } continue // skip directory marker } err = fn(remote, object, false) if err != nil { return err } } if !aws.BoolValue(resp.IsTruncated) { break } // Use NextMarker if set, otherwise use last Key if resp.NextMarker == nil || *resp.NextMarker == "" { if len(resp.Contents) == 0 { return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents") } marker = resp.Contents[len(resp.Contents)-1].Key } else { marker = resp.NextMarker } } return nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) { if isDirectory { size := int64(0) if object.Size != nil { size = *object.Size } d := fs.NewDir(remote, time.Time{}).SetSize(size) return d, nil } o, err := f.newObjectWithInfo(remote, object) if err != nil { return nil, err } return o, nil } // mark the bucket as being OK func (f *Fs) markBucketOK() { if f.bucket != "" { f.bucketOKMu.Lock() f.bucketOK = true f.bucketDeleted = false f.bucketOKMu.Unlock() } } // listDir lists files and directories to out func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { // List the objects and directories err = f.list(dir, false, func(remote string, object *s3.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err } if entry != nil { entries = append(entries, entry) } return nil }) if err != nil { return nil, err } // bucket must be present if listing succeeded f.markBucketOK() return entries, nil } // listBuckets lists the buckets to out func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { if dir != "" { return nil, fs.ErrorListBucketRequired } req := s3.ListBucketsInput{} var resp *s3.ListBucketsOutput err = f.pacer.Call(func() (bool, error) { resp, err = f.c.ListBuckets(&req) return f.shouldRetry(err) }) if err != nil { return nil, err } for _, bucket := range resp.Buckets { d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate)) entries = append(entries, d) } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { if f.bucket == "" { return f.listBuckets(dir) } return f.listDir(dir) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { if f.bucket == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err } return list.Add(entry) }) if err != nil { return err } // bucket must be present if listing succeeded f.markBucketOK() return list.Flush() } // Put the Object into the bucket func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } return fs, fs.Update(in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(in, src, options...) } // Check if the bucket exists // // NB this can return incorrect results if called immediately after bucket deletion func (f *Fs) dirExists() (bool, error) { req := s3.HeadBucketInput{ Bucket: &f.bucket, } err := f.pacer.Call(func() (bool, error) { _, err := f.c.HeadBucket(&req) return f.shouldRetry(err) }) if err == nil { return true, nil } if err, ok := err.(awserr.RequestFailure); ok { if err.StatusCode() == http.StatusNotFound { return false, nil } } return false, err } // Mkdir creates the bucket if it doesn't exist func (f *Fs) Mkdir(dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.bucketOK { return nil } if !f.bucketDeleted { exists, err := f.dirExists() if err == nil { f.bucketOK = exists } if err != nil || exists { return err } } req := s3.CreateBucketInput{ Bucket: &f.bucket, ACL: &f.opt.BucketACL, } if f.opt.LocationConstraint != "" { req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ LocationConstraint: &f.opt.LocationConstraint, } } err := f.pacer.Call(func() (bool, error) { _, err := f.c.CreateBucket(&req) return f.shouldRetry(err) }) if err, ok := err.(awserr.Error); ok { if err.Code() == "BucketAlreadyOwnedByYou" { err = nil } } if err == nil { f.bucketOK = true f.bucketDeleted = false fs.Infof(f, "Bucket created with ACL %q", *req.ACL) } return err } // Rmdir deletes the bucket if the fs is at the root // // Returns an error if it isn't empty func (f *Fs) Rmdir(dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.root != "" || dir != "" { return nil } req := s3.DeleteBucketInput{ Bucket: &f.bucket, } err := f.pacer.Call(func() (bool, error) { _, err := f.c.DeleteBucket(&req) return f.shouldRetry(err) }) if err == nil { f.bucketOK = false f.bucketDeleted = true fs.Infof(f, "Bucket deleted") } return err } // Precision of the remote func (f *Fs) Precision() time.Duration { return time.Nanosecond } // pathEscape escapes s as for a URL path. It uses rest.URLPathEscape // but also escapes '+' for S3 and Digital Ocean spaces compatibility func pathEscape(s string) string { return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1) } // Copy src to this remote using server side copy operations. // // This is stored with the remote path given // // It returns the destination Object and a possible error // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { err := f.Mkdir("") if err != nil { return nil, err } srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } srcFs := srcObj.fs key := f.root + remote source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote) req := s3.CopyObjectInput{ Bucket: &f.bucket, ACL: &f.opt.ACL, Key: &key, CopySource: &source, MetadataDirective: aws.String(s3.MetadataDirectiveCopy), } if f.opt.ServerSideEncryption != "" { req.ServerSideEncryption = &f.opt.ServerSideEncryption } if f.opt.SSEKMSKeyID != "" { req.SSEKMSKeyId = &f.opt.SSEKMSKeyID } if f.opt.StorageClass != "" { req.StorageClass = &f.opt.StorageClass } err = f.pacer.Call(func() (bool, error) { _, err = f.c.CopyObject(&req) return f.shouldRetry(err) }) if err != nil { return nil, err } return f.NewObject(remote) } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } hash := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum if !matchMd5.MatchString(hash) { err := o.readMetaData() if err != nil { return "", err } if md5sum, ok := o.meta[metaMD5Hash]; ok { md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum) if err != nil { return "", err } hash = hex.EncodeToString(md5sumBytes) } else { hash = "" } } return hash, nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.bytes } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData() (err error) { if o.meta != nil { return nil } key := o.fs.root + o.remote req := s3.HeadObjectInput{ Bucket: &o.fs.bucket, Key: &key, } var resp *s3.HeadObjectOutput err = o.fs.pacer.Call(func() (bool, error) { var err error resp, err = o.fs.c.HeadObject(&req) return o.fs.shouldRetry(err) }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr.StatusCode() == http.StatusNotFound { return fs.ErrorObjectNotFound } } return err } var size int64 // Ignore missing Content-Length assuming it is 0 // Some versions of ceph do this due their apache proxies if resp.ContentLength != nil { size = *resp.ContentLength } o.etag = aws.StringValue(resp.ETag) o.bytes = size o.meta = resp.Metadata if resp.LastModified == nil { fs.Logf(o, "Failed to read last modified from HEAD: %v", err) o.lastModified = time.Now() } else { o.lastModified = *resp.LastModified } o.mimeType = aws.StringValue(resp.ContentType) return nil } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime() time.Time { if fs.Config.UseServerModTime { return o.lastModified } err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } // read mtime out of metadata if available d, ok := o.meta[metaMtime] if !ok || d == nil { // fs.Debugf(o, "No metadata") return o.lastModified } modTime, err := swift.FloatStringToTime(*d) if err != nil { fs.Logf(o, "Failed to read mtime from object: %v", err) return o.lastModified } return modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) error { err := o.readMetaData() if err != nil { return err } o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) if o.bytes >= maxSizeForCopy { fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy)) return nil } // Guess the content type mimeType := fs.MimeType(o) // Copy the object to itself to update the metadata key := o.fs.root + o.remote sourceKey := o.fs.bucket + "/" + key directive := s3.MetadataDirectiveReplace // replace metadata with that passed in req := s3.CopyObjectInput{ Bucket: &o.fs.bucket, ACL: &o.fs.opt.ACL, Key: &key, ContentType: &mimeType, CopySource: aws.String(pathEscape(sourceKey)), Metadata: o.meta, MetadataDirective: &directive, } if o.fs.opt.ServerSideEncryption != "" { req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption } if o.fs.opt.SSEKMSKeyID != "" { req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID } if o.fs.opt.StorageClass != "" { req.StorageClass = &o.fs.opt.StorageClass } err = o.fs.pacer.Call(func() (bool, error) { _, err := o.fs.c.CopyObject(&req) return o.fs.shouldRetry(err) }) return err } // Storable raturns a boolean indicating if this object is storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { key := o.fs.root + o.remote req := s3.GetObjectInput{ Bucket: &o.fs.bucket, Key: &key, } for _, option := range options { switch option.(type) { case *fs.RangeOption, *fs.SeekOption: _, value := option.Header() req.Range = &value default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } var resp *s3.GetObjectOutput err = o.fs.pacer.Call(func() (bool, error) { var err error resp, err = o.fs.c.GetObject(&req) return o.fs.shouldRetry(err) }) if err, ok := err.(awserr.RequestFailure); ok { if err.Code() == "InvalidObjectState" { return nil, errors.Errorf("Object in GLACIER, restore first: %v", key) } } if err != nil { return nil, err } return resp.Body, nil } // Update the Object from in with modTime and size func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { err := o.fs.Mkdir("") if err != nil { return err } modTime := src.ModTime() size := src.Size() multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) var uploader *s3manager.Uploader if multipart { uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) { u.Concurrency = o.fs.opt.UploadConcurrency u.LeavePartsOnError = false u.S3 = o.fs.c u.PartSize = int64(o.fs.opt.ChunkSize) if size == -1 { // Make parts as small as possible while still being able to upload to the // S3 file size limit. Rounded up to nearest MB. u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20 return } // Adjust PartSize until the number of parts is small enough. if size/u.PartSize >= s3manager.MaxUploadParts { // Calculate partition size rounded up to the nearest MB u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20 } }) } // Set the mtime in the meta data metadata := map[string]*string{ metaMtime: aws.String(swift.TimeToFloatString(modTime)), } // read the md5sum if available for non multpart and if // disable checksum isn't present. var md5sum string if !multipart || !o.fs.opt.DisableChecksum { hash, err := src.Hash(hash.MD5) if err == nil && matchMd5.MatchString(hash) { hashBytes, err := hex.DecodeString(hash) if err == nil { md5sum = base64.StdEncoding.EncodeToString(hashBytes) if multipart { metadata[metaMD5Hash] = &md5sum } } } } // Guess the content type mimeType := fs.MimeType(src) key := o.fs.root + o.remote if multipart { req := s3manager.UploadInput{ Bucket: &o.fs.bucket, ACL: &o.fs.opt.ACL, Key: &key, Body: in, ContentType: &mimeType, Metadata: metadata, //ContentLength: &size, } if o.fs.opt.ServerSideEncryption != "" { req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption } if o.fs.opt.SSEKMSKeyID != "" { req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID } if o.fs.opt.StorageClass != "" { req.StorageClass = &o.fs.opt.StorageClass } err = o.fs.pacer.CallNoRetry(func() (bool, error) { _, err = uploader.Upload(&req) return o.fs.shouldRetry(err) }) if err != nil { return err } } else { req := s3.PutObjectInput{ Bucket: &o.fs.bucket, ACL: &o.fs.opt.ACL, Key: &key, ContentType: &mimeType, Metadata: metadata, } if md5sum != "" { req.ContentMD5 = &md5sum } if o.fs.opt.ServerSideEncryption != "" { req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption } if o.fs.opt.SSEKMSKeyID != "" { req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID } if o.fs.opt.StorageClass != "" { req.StorageClass = &o.fs.opt.StorageClass } // Create the request putObj, _ := o.fs.c.PutObjectRequest(&req) // Sign it so we can upload using a presigned request. // // Note the SDK doesn't currently support streaming to // PutObject so we'll use this work-around. url, headers, err := putObj.PresignRequest(15 * time.Minute) if err != nil { return errors.Wrap(err, "s3 upload: sign request") } // Set request to nil if empty so as not to make chunked encoding if size == 0 { in = nil } // create the vanilla http request httpReq, err := http.NewRequest("PUT", url, in) if err != nil { return errors.Wrap(err, "s3 upload: new request") } // set the headers we signed and the length httpReq.Header = headers httpReq.ContentLength = size err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err := o.fs.srv.Do(httpReq) if err != nil { return o.fs.shouldRetry(err) } body, err := rest.ReadBody(resp) if err != nil { return o.fs.shouldRetry(err) } if resp.StatusCode >= 200 && resp.StatusCode < 299 { return false, nil } err = errors.Errorf("s3 upload: %s: %s", resp.Status, body) return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err }) if err != nil { return err } } // Read the metadata from the newly created object o.meta = nil // wipe old metadata err = o.readMetaData() return err } // Remove an object func (o *Object) Remove() error { key := o.fs.root + o.remote req := s3.DeleteObjectInput{ Bucket: &o.fs.bucket, Key: &key, } err := o.fs.pacer.Call(func() (bool, error) { _, err := o.fs.c.DeleteObject(&req) return o.fs.shouldRetry(err) }) return err } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType() string { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return "" } return o.mimeType } // Check the interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.Copier = &Fs{} _ fs.PutStreamer = &Fs{} _ fs.ListRer = &Fs{} _ fs.Object = &Object{} _ fs.MimeTyper = &Object{} )
1
8,682
File is not `goimports`-ed (from `goimports`)
rclone-rclone
go
@@ -126,6 +126,10 @@ public class MessageTopView extends LinearLayout implements ShowPicturesControll mHeaderContainer.setOnFlagListener(listener); } + public void setOnToggleFlagLongClickListener(OnLongClickListener listener){ + mHeaderContainer.setOnFlagListener(listener); + } + public void showAllHeaders() { mHeaderContainer.onShowAdditionalHeaders(); }
1
package com.fsck.k9.ui.messageview; import java.util.ArrayList; import java.util.List; import android.content.Context; import android.util.AttributeSet; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.widget.Button; import android.widget.LinearLayout; import com.fsck.k9.Account; import com.fsck.k9.Account.ShowPictures; import com.fsck.k9.K9; import com.fsck.k9.R; import com.fsck.k9.helper.Contacts; import com.fsck.k9.mail.Address; import com.fsck.k9.mail.Flag; import com.fsck.k9.mail.Message; import com.fsck.k9.mail.MessagingException; import com.fsck.k9.mailstore.MessageViewInfo; import com.fsck.k9.mailstore.MessageViewInfo.MessageViewContainer; import com.fsck.k9.view.MessageHeader; public class MessageTopView extends LinearLayout implements ShowPicturesController { private MessageHeader mHeaderContainer; private LayoutInflater mInflater; private LinearLayout containerViews; private Button mDownloadRemainder; private AttachmentViewCallback attachmentCallback; private OpenPgpHeaderViewCallback openPgpHeaderViewCallback; private Button showPicturesButton; private List<MessageContainerView> messageContainerViewsWithPictures = new ArrayList<MessageContainerView>(); public MessageTopView(Context context, AttributeSet attrs) { super(context, attrs); } @Override public void onFinishInflate() { super.onFinishInflate(); mHeaderContainer = (MessageHeader) findViewById(R.id.header_container); // mHeaderContainer.setOnLayoutChangedListener(this); mInflater = LayoutInflater.from(getContext()); mDownloadRemainder = (Button) findViewById(R.id.download_remainder); mDownloadRemainder.setVisibility(View.GONE); showPicturesButton = (Button) findViewById(R.id.show_pictures); setShowPicturesButtonListener(); containerViews = (LinearLayout) findViewById(R.id.message_containers); hideHeaderView(); } private void setShowPicturesButtonListener() { showPicturesButton.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { showPicturesInAllContainerViews(); } }); } private void showPicturesInAllContainerViews() { for (MessageContainerView containerView : messageContainerViewsWithPictures) { containerView.showPictures(); } hideShowPicturesButton(); } public void resetView() { mDownloadRemainder.setVisibility(View.GONE); containerViews.removeAllViews(); } public void setMessage(Account account, MessageViewInfo messageViewInfo) throws MessagingException { resetView(); ShowPictures showPicturesSetting = account.getShowPictures(); boolean automaticallyLoadPictures = shouldAutomaticallyLoadPictures(showPicturesSetting, messageViewInfo.message); for (MessageViewContainer container : messageViewInfo.containers) { MessageContainerView view = (MessageContainerView) mInflater.inflate(R.layout.message_container, containerViews, false); boolean displayPgpHeader = account.isOpenPgpProviderConfigured(); view.displayMessageViewContainer(container, automaticallyLoadPictures, this, attachmentCallback, openPgpHeaderViewCallback, displayPgpHeader); containerViews.addView(view); } } /** * Fetch the message header view. This is not the same as the message headers; this is the View shown at the top * of messages. * @return MessageHeader View. */ public MessageHeader getMessageHeaderView() { return mHeaderContainer; } public void setHeaders(final Message message, Account account) { try { mHeaderContainer.populate(message, account); mHeaderContainer.setVisibility(View.VISIBLE); } catch (Exception me) { Log.e(K9.LOG_TAG, "setHeaders - error", me); } } public void setOnToggleFlagClickListener(OnClickListener listener) { mHeaderContainer.setOnFlagListener(listener); } public void showAllHeaders() { mHeaderContainer.onShowAdditionalHeaders(); } public boolean additionalHeadersVisible() { return mHeaderContainer.additionalHeadersVisible(); } private void hideHeaderView() { mHeaderContainer.setVisibility(View.GONE); } public void setOnDownloadButtonClickListener(OnClickListener listener) { mDownloadRemainder.setOnClickListener(listener); } public void setAttachmentCallback(AttachmentViewCallback callback) { attachmentCallback = callback; } public void setOpenPgpHeaderViewCallback(OpenPgpHeaderViewCallback callback) { openPgpHeaderViewCallback = callback; } public void enableDownloadButton() { mDownloadRemainder.setEnabled(true); } public void disableDownloadButton() { mDownloadRemainder.setEnabled(false); } public void setShowDownloadButton(Message message) { if (message.isSet(Flag.X_DOWNLOADED_FULL)) { mDownloadRemainder.setVisibility(View.GONE); } else { mDownloadRemainder.setEnabled(true); mDownloadRemainder.setVisibility(View.VISIBLE); } } private void showShowPicturesButton() { showPicturesButton.setVisibility(View.VISIBLE); } private void hideShowPicturesButton() { showPicturesButton.setVisibility(View.GONE); } @Override public void notifyMessageContainerContainsPictures(MessageContainerView messageContainerView) { messageContainerViewsWithPictures.add(messageContainerView); showShowPicturesButton(); } private boolean shouldAutomaticallyLoadPictures(ShowPictures showPicturesSetting, Message message) { return showPicturesSetting == ShowPictures.ALWAYS || shouldShowPicturesFromSender(showPicturesSetting, message); } private boolean shouldShowPicturesFromSender(ShowPictures showPicturesSetting, Message message) { if (showPicturesSetting != ShowPictures.ONLY_FROM_CONTACTS) { return false; } String senderEmailAddress = getSenderEmailAddress(message); if (senderEmailAddress == null) { return false; } Contacts contacts = Contacts.getInstance(getContext()); return contacts.isInContacts(senderEmailAddress); } private String getSenderEmailAddress(Message message) { Address[] from = message.getFrom(); if (from == null || from.length == 0) { return null; } return from[0].getAddress(); } }
1
13,435
This change seems unrelated to copying email addresses to the clipboard. Please limit the scope of pull requests as much as possible.
k9mail-k-9
java
@@ -370,6 +370,13 @@ static void seat_client_send_keymap(struct wlr_seat_client *client, continue; } + if (keyboard->keymap == NULL) { + wl_keyboard_send_keymap(resource, + WL_KEYBOARD_KEYMAP_FORMAT_NO_KEYMAP, keymap_fd, 0); + close(keymap_fd); + continue; + } + void *ptr = mmap(NULL, keyboard->keymap_size, PROT_READ | PROT_WRITE, MAP_SHARED, keymap_fd, 0); if (ptr == MAP_FAILED) {
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <time.h> #include <unistd.h> #include <wayland-server-core.h> #include <wlr/types/wlr_data_device.h> #include <wlr/types/wlr_input_device.h> #include <wlr/util/log.h> #include "types/wlr_data_device.h" #include "types/wlr_seat.h" #include "util/shm.h" #include "util/signal.h" static void default_keyboard_enter(struct wlr_seat_keyboard_grab *grab, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { wlr_seat_keyboard_enter(grab->seat, surface, keycodes, num_keycodes, modifiers); } static void default_keyboard_clear_focus(struct wlr_seat_keyboard_grab *grab) { wlr_seat_keyboard_clear_focus(grab->seat); } static void default_keyboard_key(struct wlr_seat_keyboard_grab *grab, uint32_t time, uint32_t key, uint32_t state) { wlr_seat_keyboard_send_key(grab->seat, time, key, state); } static void default_keyboard_modifiers(struct wlr_seat_keyboard_grab *grab, struct wlr_keyboard_modifiers *modifiers) { wlr_seat_keyboard_send_modifiers(grab->seat, modifiers); } static void default_keyboard_cancel(struct wlr_seat_keyboard_grab *grab) { // cannot be cancelled } const struct wlr_keyboard_grab_interface default_keyboard_grab_impl = { .enter = default_keyboard_enter, .clear_focus = default_keyboard_clear_focus, .key = default_keyboard_key, .modifiers = default_keyboard_modifiers, .cancel = default_keyboard_cancel, }; static void keyboard_release(struct wl_client *client, struct wl_resource *resource) { wl_resource_destroy(resource); } static const struct wl_keyboard_interface keyboard_impl = { .release = keyboard_release, }; static struct wlr_seat_client *seat_client_from_keyboard_resource( struct wl_resource *resource) { assert(wl_resource_instance_of(resource, &wl_keyboard_interface, &keyboard_impl)); return wl_resource_get_user_data(resource); } static void keyboard_handle_resource_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); seat_client_destroy_keyboard(resource); } void wlr_seat_keyboard_send_key(struct wlr_seat *wlr_seat, uint32_t time, uint32_t key, uint32_t state) { struct wlr_seat_client *client = wlr_seat->keyboard_state.focused_client; if (!client) { return; } uint32_t serial = wlr_seat_client_next_serial(client); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } wl_keyboard_send_key(resource, serial, time, key, state); } } static void seat_client_send_keymap(struct wlr_seat_client *client, struct wlr_keyboard *keyboard); static void handle_keyboard_keymap(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_keymap); struct wlr_seat_client *client; struct wlr_keyboard *keyboard = data; if (keyboard == state->keyboard) { wl_list_for_each(client, &state->seat->clients, link) { seat_client_send_keymap(client, state->keyboard); } } } static void seat_client_send_repeat_info(struct wlr_seat_client *client, struct wlr_keyboard *keyboard); static void handle_keyboard_repeat_info(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_repeat_info); struct wlr_seat_client *client; wl_list_for_each(client, &state->seat->clients, link) { seat_client_send_repeat_info(client, state->keyboard); } } static void handle_keyboard_destroy(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_destroy); wlr_seat_set_keyboard(state->seat, NULL); } void wlr_seat_set_keyboard(struct wlr_seat *seat, struct wlr_input_device *device) { // TODO call this on device key event before the event reaches the // compositor and set a pending keyboard and then send the new keyboard // state on the next keyboard notify event. struct wlr_keyboard *keyboard = (device ? device->keyboard : NULL); if (seat->keyboard_state.keyboard == keyboard) { return; } if (seat->keyboard_state.keyboard) { wl_list_remove(&seat->keyboard_state.keyboard_destroy.link); wl_list_remove(&seat->keyboard_state.keyboard_keymap.link); wl_list_remove(&seat->keyboard_state.keyboard_repeat_info.link); seat->keyboard_state.keyboard = NULL; } if (keyboard) { assert(device->type == WLR_INPUT_DEVICE_KEYBOARD); seat->keyboard_state.keyboard = keyboard; wl_signal_add(&device->events.destroy, &seat->keyboard_state.keyboard_destroy); seat->keyboard_state.keyboard_destroy.notify = handle_keyboard_destroy; wl_signal_add(&device->keyboard->events.keymap, &seat->keyboard_state.keyboard_keymap); seat->keyboard_state.keyboard_keymap.notify = handle_keyboard_keymap; wl_signal_add(&device->keyboard->events.repeat_info, &seat->keyboard_state.keyboard_repeat_info); seat->keyboard_state.keyboard_repeat_info.notify = handle_keyboard_repeat_info; struct wlr_seat_client *client; wl_list_for_each(client, &seat->clients, link) { seat_client_send_keymap(client, keyboard); seat_client_send_repeat_info(client, keyboard); } wlr_seat_keyboard_send_modifiers(seat, &keyboard->modifiers); } else { seat->keyboard_state.keyboard = NULL; } } struct wlr_keyboard *wlr_seat_get_keyboard(struct wlr_seat *seat) { return seat->keyboard_state.keyboard; } void wlr_seat_keyboard_start_grab(struct wlr_seat *wlr_seat, struct wlr_seat_keyboard_grab *grab) { grab->seat = wlr_seat; wlr_seat->keyboard_state.grab = grab; wlr_signal_emit_safe(&wlr_seat->events.keyboard_grab_begin, grab); } void wlr_seat_keyboard_end_grab(struct wlr_seat *wlr_seat) { struct wlr_seat_keyboard_grab *grab = wlr_seat->keyboard_state.grab; if (grab != wlr_seat->keyboard_state.default_grab) { wlr_seat->keyboard_state.grab = wlr_seat->keyboard_state.default_grab; wlr_signal_emit_safe(&wlr_seat->events.keyboard_grab_end, grab); if (grab->interface->cancel) { grab->interface->cancel(grab); } } } static void seat_keyboard_handle_surface_destroy(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of( listener, state, surface_destroy); wl_list_remove(&state->surface_destroy.link); wl_list_init(&state->surface_destroy.link); wlr_seat_keyboard_clear_focus(state->seat); } void wlr_seat_keyboard_send_modifiers(struct wlr_seat *seat, struct wlr_keyboard_modifiers *modifiers) { struct wlr_seat_client *client = seat->keyboard_state.focused_client; if (client == NULL) { return; } uint32_t serial = wlr_seat_client_next_serial(client); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } if (modifiers == NULL) { wl_keyboard_send_modifiers(resource, serial, 0, 0, 0, 0); } else { wl_keyboard_send_modifiers(resource, serial, modifiers->depressed, modifiers->latched, modifiers->locked, modifiers->group); } } } void seat_client_send_keyboard_leave_raw(struct wlr_seat_client *seat_client, struct wlr_surface *surface) { uint32_t serial = wlr_seat_client_next_serial(seat_client); struct wl_resource *resource; wl_resource_for_each(resource, &seat_client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } wl_keyboard_send_leave(resource, serial, surface->resource); } } void wlr_seat_keyboard_enter(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { if (seat->keyboard_state.focused_surface == surface) { // this surface already got an enter notify return; } struct wlr_seat_client *client = NULL; if (surface) { struct wl_client *wl_client = wl_resource_get_client(surface->resource); client = wlr_seat_client_for_wl_client(seat, wl_client); } struct wlr_seat_client *focused_client = seat->keyboard_state.focused_client; struct wlr_surface *focused_surface = seat->keyboard_state.focused_surface; // leave the previously entered surface if (focused_client != NULL && focused_surface != NULL) { seat_client_send_keyboard_leave_raw(focused_client, focused_surface); } // enter the current surface if (client != NULL) { struct wl_array keys; wl_array_init(&keys); for (size_t i = 0; i < num_keycodes; ++i) { uint32_t *p = wl_array_add(&keys, sizeof(uint32_t)); if (!p) { wlr_log(WLR_ERROR, "Cannot allocate memory, skipping keycode: %" PRIu32 "\n", keycodes[i]); continue; } *p = keycodes[i]; } uint32_t serial = wlr_seat_client_next_serial(client); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } wl_keyboard_send_enter(resource, serial, surface->resource, &keys); } wl_array_release(&keys); } // reinitialize the focus destroy events wl_list_remove(&seat->keyboard_state.surface_destroy.link); wl_list_init(&seat->keyboard_state.surface_destroy.link); if (surface) { wl_signal_add(&surface->events.destroy, &seat->keyboard_state.surface_destroy); seat->keyboard_state.surface_destroy.notify = seat_keyboard_handle_surface_destroy; } seat->keyboard_state.focused_client = client; seat->keyboard_state.focused_surface = surface; if (client != NULL) { // tell new client about any modifier change last, // as it targets seat->keyboard_state.focused_client wlr_seat_keyboard_send_modifiers(seat, modifiers); seat_client_send_selection(client); } struct wlr_seat_keyboard_focus_change_event event = { .seat = seat, .old_surface = focused_surface, .new_surface = surface, }; wlr_signal_emit_safe(&seat->keyboard_state.events.focus_change, &event); } void wlr_seat_keyboard_notify_enter(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { // NULL surfaces are prohibited in the grab-compatible API. Use // wlr_seat_keyboard_notify_clear_focus() instead. assert(surface); struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->enter(grab, surface, keycodes, num_keycodes, modifiers); } void wlr_seat_keyboard_clear_focus(struct wlr_seat *seat) { wlr_seat_keyboard_enter(seat, NULL, NULL, 0, NULL); } void wlr_seat_keyboard_notify_clear_focus(struct wlr_seat *seat) { struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->clear_focus(grab); } bool wlr_seat_keyboard_has_grab(struct wlr_seat *seat) { return seat->keyboard_state.grab->interface != &default_keyboard_grab_impl; } void wlr_seat_keyboard_notify_modifiers(struct wlr_seat *seat, struct wlr_keyboard_modifiers *modifiers) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->modifiers(grab, modifiers); } void wlr_seat_keyboard_notify_key(struct wlr_seat *seat, uint32_t time, uint32_t key, uint32_t state) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->key(grab, time, key, state); } static void seat_client_send_keymap(struct wlr_seat_client *client, struct wlr_keyboard *keyboard) { if (!keyboard) { return; } // TODO: We should probably lift all of the keys set by the other // keyboard struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } int keymap_fd = allocate_shm_file(keyboard->keymap_size); if (keymap_fd < 0) { wlr_log(WLR_ERROR, "creating a keymap file for %zu bytes failed", keyboard->keymap_size); continue; } void *ptr = mmap(NULL, keyboard->keymap_size, PROT_READ | PROT_WRITE, MAP_SHARED, keymap_fd, 0); if (ptr == MAP_FAILED) { wlr_log(WLR_ERROR, "failed to mmap() %zu bytes", keyboard->keymap_size); close(keymap_fd); continue; } strcpy(ptr, keyboard->keymap_string); munmap(ptr, keyboard->keymap_size); wl_keyboard_send_keymap(resource, WL_KEYBOARD_KEYMAP_FORMAT_XKB_V1, keymap_fd, keyboard->keymap_size); close(keymap_fd); } } static void seat_client_send_repeat_info(struct wlr_seat_client *client, struct wlr_keyboard *keyboard) { if (!keyboard) { return; } struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } if (wl_resource_get_version(resource) >= WL_KEYBOARD_REPEAT_INFO_SINCE_VERSION) { wl_keyboard_send_repeat_info(resource, keyboard->repeat_info.rate, keyboard->repeat_info.delay); } } } void seat_client_create_keyboard(struct wlr_seat_client *seat_client, uint32_t version, uint32_t id) { struct wl_resource *resource = wl_resource_create(seat_client->client, &wl_keyboard_interface, version, id); if (resource == NULL) { wl_client_post_no_memory(seat_client->client); return; } wl_resource_set_implementation(resource, &keyboard_impl, seat_client, keyboard_handle_resource_destroy); wl_list_insert(&seat_client->keyboards, wl_resource_get_link(resource)); if ((seat_client->seat->capabilities & WL_SEAT_CAPABILITY_KEYBOARD) == 0) { wl_resource_set_user_data(resource, NULL); return; } struct wlr_keyboard *keyboard = seat_client->seat->keyboard_state.keyboard; if (keyboard == NULL) { return; } seat_client_send_keymap(seat_client, keyboard); seat_client_send_repeat_info(seat_client, keyboard); struct wlr_seat_client *focused_client = seat_client->seat->keyboard_state.focused_client; struct wlr_surface *focused_surface = seat_client->seat->keyboard_state.focused_surface; // Send an enter event if there is a focused client/surface stored if (focused_client != NULL && focused_surface != NULL) { uint32_t *keycodes = keyboard->keycodes; size_t num_keycodes = keyboard->num_keycodes; struct wl_array keys; wl_array_init(&keys); for (size_t i = 0; i < num_keycodes; ++i) { uint32_t *p = wl_array_add(&keys, sizeof(uint32_t)); if (!p) { wlr_log(WLR_ERROR, "Cannot allocate memory, skipping keycode: %" PRIu32 "\n", keycodes[i]); continue; } *p = keycodes[i]; } uint32_t serial = wlr_seat_client_next_serial(focused_client); struct wl_resource *resource; wl_resource_for_each(resource, &focused_client->keyboards) { if (wl_resource_get_id(resource) == id) { if (seat_client_from_keyboard_resource(resource) == NULL) { continue; } wl_keyboard_send_enter(resource, serial, focused_surface->resource, &keys); } } wl_array_release(&keys); wlr_seat_keyboard_send_modifiers(seat_client->seat, &keyboard->modifiers); } } void seat_client_destroy_keyboard(struct wl_resource *resource) { struct wlr_seat_client *seat_client = seat_client_from_keyboard_resource(resource); if (seat_client == NULL) { return; } wl_resource_set_user_data(resource, NULL); }
1
18,320
I'm assuming that there isn't a way for us to skip sending an fd here (-1?), and that we're allocating this fd just to send a legal but useless value across?
swaywm-wlroots
c
@@ -66,7 +66,11 @@ func TransportSpec(opts ...Option) yarpcconfig.TransportSpec { // All parameters of TransportConfig are optional. This section // may be omitted in the transports section. type TransportConfig struct { - Backoff yarpcconfig.Backoff `config:"backoff"` + ServerMaxRecvMsgSize int `config:"serverMaxRecvMsgSize"` + ServerMaxSendMsgSize int `config:"serverMaxSendMsgSize"` + ClientMaxRecvMsgSize int `config:"clientMaxRecvMsgSize"` + ClientMaxSendMsgSize int `config:"clientMaxSendMsgSize"` + Backoff yarpcconfig.Backoff `config:"backoff"` } // InboundConfig configures a gRPC Inbound.
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package grpc import ( "fmt" "net" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/peer/hostport" "go.uber.org/yarpc/yarpcconfig" ) const transportName = "grpc" // TransportSpec returns a TransportSpec for the gRPC transport. // // See TransportConfig, InboundConfig, and OutboundConfig for details on the // different configuration parameters supported by this Transport. // // Any TransportOption, InboundOption, or OutboundOption may be passed to this function. // These options will be applied BEFORE configuration parameters are // interpreted. This allows configuration parameters to override Options // provided to TransportSpec. func TransportSpec(opts ...Option) yarpcconfig.TransportSpec { transportSpec, err := newTransportSpec(opts...) if err != nil { panic(err.Error()) } return yarpcconfig.TransportSpec{ Name: transportName, BuildTransport: transportSpec.buildTransport, BuildInbound: transportSpec.buildInbound, BuildUnaryOutbound: transportSpec.buildUnaryOutbound, } } // TransportConfig configures a gRPC Transport. This is shared // between all gRPC inbounds and outbounds of a Dispatcher. // // transports: // grpc: // backoff: // exponential: // first: 10ms // max: 30s // // All parameters of TransportConfig are optional. This section // may be omitted in the transports section. type TransportConfig struct { Backoff yarpcconfig.Backoff `config:"backoff"` } // InboundConfig configures a gRPC Inbound. // // inbounds: // grpc: // address: ":80" type InboundConfig struct { // Address to listen on. This field is required. Address string `config:"address,interpolate"` } // OutboundConfig configures a gRPC Outbound. // // outbounds: // myservice: // grpc: // address: ":80" // // A gRPC outbound can also configure a peer list. // // outbounds: // myservice: // grpc: // round-robin: // peers: // - 127.0.0.1:8080 // - 127.0.0.1:8081 type OutboundConfig struct { yarpcconfig.PeerChooser // Address to connect to if no peer options set. Address string `config:"address,interpolate"` } type transportSpec struct { TransportOptions []TransportOption InboundOptions []InboundOption OutboundOptions []OutboundOption } func newTransportSpec(opts ...Option) (*transportSpec, error) { transportSpec := &transportSpec{} for _, o := range opts { switch opt := o.(type) { case TransportOption: transportSpec.TransportOptions = append(transportSpec.TransportOptions, opt) case InboundOption: transportSpec.InboundOptions = append(transportSpec.InboundOptions, opt) case OutboundOption: transportSpec.OutboundOptions = append(transportSpec.OutboundOptions, opt) default: return nil, fmt.Errorf("unknown option of type %T: %v", o, o) } } return transportSpec, nil } func (t *transportSpec) buildTransport(transportConfig *TransportConfig, _ *yarpcconfig.Kit) (transport.Transport, error) { transportOptions := newTransportOptions(t.TransportOptions) backoffStrategy, err := transportConfig.Backoff.Strategy() if err != nil { return nil, err } transportOptions.backoffStrategy = backoffStrategy return newTransport(transportOptions), nil } func (t *transportSpec) buildInbound(inboundConfig *InboundConfig, tr transport.Transport, _ *yarpcconfig.Kit) (transport.Inbound, error) { trans, ok := tr.(*Transport) if !ok { return nil, newTransportCastError(tr) } if inboundConfig.Address == "" { return nil, newRequiredFieldMissingError("address") } listener, err := net.Listen("tcp", inboundConfig.Address) if err != nil { return nil, err } return trans.NewInbound(listener, t.InboundOptions...), nil } func (t *transportSpec) buildUnaryOutbound(outboundConfig *OutboundConfig, tr transport.Transport, kit *yarpcconfig.Kit) (transport.UnaryOutbound, error) { trans, ok := tr.(*Transport) if !ok { return nil, newTransportCastError(tr) } if outboundConfig.Empty() { if outboundConfig.Address == "" { return nil, newRequiredFieldMissingError("address") } return trans.NewSingleOutbound(outboundConfig.Address, t.OutboundOptions...), nil } chooser, err := outboundConfig.BuildPeerChooser(trans, hostport.Identify, kit) if err != nil { return nil, err } return trans.NewOutbound(chooser, t.OutboundOptions...), nil } func newTransportCastError(tr transport.Transport) error { return fmt.Errorf("could not cast %T to a *grpc.Transport", tr) } func newRequiredFieldMissingError(field string) error { return fmt.Errorf("required field missing: %s", field) }
1
15,344
might be good for the units (bytes?) to be in the variable name.
yarpc-yarpc-go
go
@@ -1,6 +1,6 @@ # NOTE: There are several rewrite rules defined in # config/initializers/rack_rewrite.rb which run before these routes. -Upcase::Application.routes.draw do +Rails.application.routes.draw do scope "upcase" do root to: "marketing#show"
1
# NOTE: There are several rewrite rules defined in # config/initializers/rack_rewrite.rb which run before these routes. Upcase::Application.routes.draw do scope "upcase" do root to: "marketing#show" use_doorkeeper scope module: "admin" do resources :users, only: [] do resource :masquerade, only: :create end resource :masquerade, only: :destroy end constraints Clearance::Constraints::SignedIn.new(&:admin?) do namespace :admin do resources :decks, only: [:new, :create, :show, :index] do resources :flashcards, only: [:new, :create, :edit, :update] resource :flashcard_preview, only: [:create] patch :flashcard_preview, to: "flashcard_previews#create" end end end mount RailsAdmin::Engine => "/admin", as: :rails_admin namespace :api do namespace :v1 do resources :exercises, only: [:update] post( "exercises/:exercise_uuid/status" => "statuses#create", as: :exercise_status, ) post( "videos/:video_wistia_id/status" => "statuses#create", as: :video_status, ) end end get "/api/v1/me.json" => "api/v1/users#show", as: :resource_owner resources( :passwords, controller: "clearance/passwords", only: [:create, :new], ) resource :session, controller: "sessions", only: [:create] resources :users, controller: "clearance/users", only: [] do resource( :password, controller: "clearance/passwords", only: [:create, :edit, :update], ) end get "/unsubscribes/:token" => "unsubscribes#show", as: :unsubscribe namespace "webhooks" do post "intercom-unsubscribes", to: "intercom_unsubscribes#create" end get "/join" => "checkouts#new", plan: "professional", as: :sign_up get "/sign_in" => "sessions#new", as: "sign_in" delete "/sign_out" => "sessions#destroy", as: "sign_out" resources :clips, only: [] do resource :download, only: [:show] end resources :decks, only: [:show, :index] do resources :flashcards, only: [:show] resource :results, only: [:show] end resources :flashcards, only: [] do resources :attempts, only: [:create, :update] end resources :exercises, only: [] do resource :trail, controller: "exercise_trails", only: [:show] end get "/pages/:id", to: "high_voltage/pages#show", as: :page, format: false get "/privacy", to: "pages#show", as: :privacy, id: "privacy" get "/purchases/:lookup", to: "pages#show", id: "purchase-show" get "/terms", to: "pages#show", as: :terms, id: "terms" get "/pages/welcome", to: "pages#show", as: "welcome" get "/pre-sales/python", to: "pages#show", id: "pre_sale_python" scope ":plan" do resource :authenticated_on_checkout, only: [:show] resources :checkouts, only: [:new, :create] end resources :repositories, only: [:index] do resource :collaboration, only: [:create] end get( ":id" => "repositories#show", as: :repository, constraints: SlugConstraint.new(Repository), ) resource :search, only: [:show, :create] get( ":id" => "shows#show", as: :show, constraints: SlugConstraint.new(Show), ) mount StripeEvent::Engine, at: "stripe-webhook" namespace :subscriber do resources :invoices, only: [:index, :show] resource :cancellation, only: [:new, :create] resource :paused_subscription, only: [:create] resource :discount, only: :create resource :reactivation, only: [:create] resource :resubscription, only: [:create] end namespace :beta do resources :offers, only: [] do resource :reply, only: :create end end get "/teams", to: "teams#new" resource :team, only: :edit resources :invitations, only: [:create, :destroy] do resources :acceptances, only: [:new, :create] end resources :memberships, only: [:destroy] get "/trails/completed" => "completed_trails#index", as: :completed_trails get( ":id" => "trails#show", as: :trail, constraints: SlugConstraint.new(Trail), ) get "/sign_up" => "users#new", as: "sign_up_app" get "/my_account" => "users#edit", as: "my_account" patch "/my_account" => "users#update", as: "edit_my_account" resources :users, controller: "users" do resources :notes, only: [:create, :edit, :update] resource( :password, controller: "passwords", only: [:create, :edit, :update], ) end resources :passwords, controller: "passwords", only: [:create, :new] get "/vanity" => "vanity#index" get "/vanity/participant/:id" => "vanity#participant" post "/vanity/complete" post "/vanity/chooses" post "/vanity/reset" post "/vanity/add_participant" get "/vanity/image" resources :videos, only: [:show] do resource :auth_to_access, only: [:show] resource :twitter_player_card, only: [:show] resources :completions, only: [:create], controller: "video_completions" end resource :annual_billing, only: :new resource :credit_card, only: [:update] resource :forum_sessions, only: :new resources :payments, only: [:new] resources :signups, only: [:create] resource :subscription, only: [:edit, :update] resources :coupons, only: :show resources :topics, only: :index, constraints: { format: "css" } resources :onboardings, only: :create get "forum", to: redirect("https://forum.upcase.com"), as: "forum" resources( :design_for_developers_resources, path: "design-for-developers-resources", only: [:index, :show], ) resources( :test_driven_rails_resources, path: "test-driven-rails-resources", only: [:index], ) resources :tapas_payments, only: [:create] get "/practice" => "practice#show", as: :practice get "sitemap.xml" => "sitemaps#show", as: :sitemap, format: "xml" get ":id" => "topics#show", as: :topic get "/auth/:provider/callback", to: "auth_callbacks#create" end end
1
18,613
Metrics/BlockLength: Block has too many lines. [166/25]
thoughtbot-upcase
rb
@@ -23,9 +23,9 @@ CSV file based implementation of a record stream FileRecordStream is class that can read and write .csv files that contain -records. The file has 3 header lines that contain for each field the name, type -and a special indicator for the fields that serve as the reset field, -sequence id field and timestamp field. The other fields +records. The file has 3 header lines that contain, for each field, the name +(line 1), type (line 2), and a special indicator (line 3) for those fields that +serve as the reset field, sequence id field, and timestamp field. The header lines look like:
1
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ CSV file based implementation of a record stream FileRecordStream is class that can read and write .csv files that contain records. The file has 3 header lines that contain for each field the name, type and a special indicator for the fields that serve as the reset field, sequence id field and timestamp field. The other fields The header lines look like: f1,f2,f3,....fN int,string,datetime,bool,... R,S,T,,,,.... The data lines are just comma separated values that match the types in the second header line. The supported types are: int, float, string, bool, datetime The format for datetime fields is yyyy-mm-dd hh:mm:ss.us The 'us' component is microseconds. When reading a file the FileRecordStream will automatically read the header line and will figure out the type of each field and what are the timestamp, reset and sequenceId fields (if any). The FileRecordStream class supports the context manager ('with' statement ) protocol. That means you con do: with FileRecordStream(filename) as f: ... ... When the control exits the 'with' block the file will be closed automatically. You may still call the .close() method at any point (even multiple times). The FileRecordStream also supports the iteration protocol so you may read its contents using a for loop: for r in f: print r """ import os import csv import copy import json from nupic.data.fieldmeta import FieldMetaInfo from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA from nupic.data.record_stream import RecordStreamIface from nupic.data.utils import ( intOrNone, floatOrNone, parseBool, parseTimestamp, serializeTimestamp, serializeTimestampNoMS, escape, unescape, parseSdr, serializeSdr) ############################################################################### class FileRecordStream(RecordStreamIface): """ CSV file based RecordStream implementation """ # Private: number of header rows (field names, types, special) _NUM_HEADER_ROWS = 3 # Private: file mode for opening file for writing _FILE_WRITE_MODE = 'w' # Private: file mode for opening file for reading _FILE_READ_MODE = 'rU' ############################################################################# def __init__(self, streamID, write=False, fields=None, missingValues=None, bookmark=None, includeMS=True, firstRecord=None): """ Constructor streamID: CSV file name, input or output write: True or False, open for writing if True fields: a list of nupic.data.fieldmeta.FieldMetaInfo field descriptors, only applicable when write==True missingValues: what missing values should be replaced with? bookmark: a reference to the previous reader, if passed in, the records will be returned starting from the point where bookmark was requested. Either bookmark or firstRecord can be specified, not both. If bookmark is used, then firstRecord MUST be None. includeMS: If false, the microseconds portion is not included in the generated output file timestamp fields. This makes it compatible with reading in from Excel. firstRecord: 0-based index of the first record to start reading from. Either bookmark or firstRecord can be specified, not both. If bookmark is used, then firstRecord MUST be None. Each field is a 3-tuple (name, type, special or '') The name is the name of the field. The type is one of: 'string', 'datetime', 'int', 'float', 'bool' The special is either empty or one of S, R, T, C that designate their field as the sequenceId, reset, timestamp, or category. There can be at most one of each. There may be multiple fields of type datetime, but no more than one of them may be the timestamp field (T). The sequence id field must be either a string or an int. The reset field must be an int (and must contain 0 or 1). The category field must be an int. The FileRecordStream iterates over the field names, types and specials and stores the information. """ # Call superclass constructor super(FileRecordStream, self).__init__() # Only bookmark or firstRow can be specified, not both if bookmark is not None and firstRecord is not None: raise RuntimeError("Only bookmark or firstRecord can be specified, not both") if fields is None: fields = [] if missingValues is None: missingValues = [''] # We'll be operating on csvs with arbitrarily long fields size = 2**27 csv.field_size_limit(size) self._filename = streamID # We can't guarantee what system files are coming from, use universal # newlines self._write = write self._mode = self._FILE_WRITE_MODE if write else self._FILE_READ_MODE self._file = open(self._filename, self._mode) self._sequences = set() self.rewindAtEOF = False if write: assert fields is not None assert isinstance(fields, (tuple, list)) # Verify all fields are 3-tuple assert all(isinstance(f, (tuple, FieldMetaInfo)) and len(f) == 3 for f in fields) names, types, specials = zip(*fields) self._writer = csv.writer(self._file) else: os.linesep = '\n' # make sure readline() works on windows too. # Read header lines self._reader = csv.reader(self._file, dialect='excel', quoting=csv.QUOTE_NONE) try: names = [n.strip() for n in self._reader.next()] except: raise Exception('The header line of the file %s contained a NULL byte' \ % self._filename) types = [t.strip() for t in self._reader.next()] specials = [s.strip() for s in self._reader.next()] # If there are no specials, this means there was a blank line if len(specials) == 0: specials=[""] if not(len(names) == len(types) == len(specials)): raise Exception('Invalid file format: different number of fields ' 'in the header rows of file %s (%d, %d, %d)' % (streamID, len(names), len(types), len(specials))) # Verify standard file format allowedTypes = ('string', 'datetime', 'int', 'float', 'bool', 'sdr') for i, t in enumerate(types): # This is a temporary hack for the Precog milestone, which passes in a # type 'address' for address fields. Here we simply map the type "address" # to "string". if t == 'address': types[i] = 'string' t = 'string' if t not in allowedTypes: raise Exception('Invalid file format for "%s" - field type "%s" ' 'not one of %s ' % (self._filename, t, allowedTypes)) for s in specials: if s not in ('', 'T', 'R', 'S', 'C', 'L'): raise Exception('Invalid file format. \'%s\' is not a valid special ' 'flag' % s) self._fields = [FieldMetaInfo(*attrs) for attrs in zip(names, types, specials)] self._fieldCount = len(self._fields) # Keep track on how many records have been read/written self._recordCount = 0 self._timeStampIdx = specials.index('T') if 'T' in specials else None self._resetIdx = specials.index('R') if 'R' in specials else None self._sequenceIdIdx = specials.index('S') if 'S' in specials else None self._categoryIdx = specials.index('C') if 'C' in specials else None self._learningIdx = specials.index('L') if 'L' in specials else None # keep track of the current sequence self._currSequence = None self._currTime = None if self._timeStampIdx: assert types[self._timeStampIdx] == 'datetime' if self._sequenceIdIdx: assert types[self._sequenceIdIdx] in ('string', 'int') if self._resetIdx: assert types[self._resetIdx] == 'int' if self._categoryIdx: assert types[self._categoryIdx] == 'int' if self._learningIdx: assert types[self._learningIdx] == 'int' # Convert the types to the actual types in order to convert the strings if self._mode == self._FILE_READ_MODE: m = dict(int=intOrNone, float=floatOrNone, bool=parseBool, string=unescape, datetime=parseTimestamp, sdr=parseSdr) else: if includeMS: datetimeFunc = serializeTimestamp else: datetimeFunc = serializeTimestampNoMS m = dict(int=str, float=str, string=escape, bool=str, datetime=datetimeFunc, sdr=serializeSdr) self._adapters = [m[t] for t in types] self._missingValues = missingValues # # If the bookmark is set, we need to skip over first N records # if bookmark is not None: rowsToSkip = self._getStartRow(bookmark) elif firstRecord is not None: rowsToSkip = firstRecord else: rowsToSkip = 0 while rowsToSkip > 0: self.next() rowsToSkip -= 1 # Dictionary to store record statistics (min and max of scalars for now) self._stats = None ############################################################################# def __getstate__(self): d = dict() d.update(self.__dict__) del d['_reader'] del d['_file'] return d ############################################################################# def __setstate__(self, state): self.__dict__ = state self._file = None self._reader = None self.rewind() ############################################################################# def close(self): if self._file is not None: self._file.close() self._file = None ############################################################################## def rewind(self): """Put us back at the beginning of the file again) """ # Superclass rewind super(FileRecordStream, self).rewind() self.close() self._file = open(self._filename, self._mode) self._reader = csv.reader(self._file, dialect='excel', quoting=csv.QUOTE_NONE) # Skip header rows row = self._reader.next() row = self._reader.next() row = self._reader.next() # Reset record count, etc. self._recordCount = 0 ############################################################################# def getNextRecord(self, useCache=True): """ Returns next available data record from the file. retval: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record. """ assert self._file is not None assert self._mode == self._FILE_READ_MODE # Read the line try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if self._recordCount == 0: raise Exception("The source configured to reset at EOF but " "'%s' appears to be empty" % self._filename) self.rewind() line = self._reader.next() else: return None # Keep score of how many records were read self._recordCount += 1 # Split the line to text fields and convert each text field to a Python # object if value is missing (empty string) encode appropriately for # upstream consumers in the case of numeric types, this means replacing # missing data with a sentinel value for string type, we can leave the empty # string in place record = [] for i, f in enumerate(line): #print "DEBUG: Evaluating field @ index %s: %r" % (i, f) #sys.stdout.flush() if f in self._missingValues: record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: # either there is valid data, or the field is string type, # in which case the adapter does the right thing by default record.append(self._adapters[i](f)) return record ############################################################################# def getRecordsRange(self, bookmark=None, range=None): """ Returns a range of records, starting from the bookmark. If 'bookmark' is None, then records read from the first available. If 'range' is None, all available records will be returned (caution: this could be a lot of records and require a lot of memory). """ raise Exception('getRecordsRange() is not supported for the file storage') ############################################################################# def getLastRecords(self, numRecords): """ Returns a tuple (successCode, recordsArray), where successCode - if the stream had enough records to return, True/False recordsArray - an array of last numRecords records available when the call was made. Records appended while in the getLastRecords will be not returned until the next call to either getNextRecord() or getLastRecords() """ raise Exception('getLastRecords() is not supported for the file storage') ############################################################################# def removeOldData(self): raise Exception('removeOldData is not supported in this class.') ############################################################################# def appendRecord(self, record, inputBookmark=None): """ Saves the record in the underlying csv file. record: a list of Python objects that will be string-ified Returns: nothing """ # input bookmark is not applicable in case of a file storage inputBookmark = inputBookmark assert self._file is not None assert self._mode == self._FILE_WRITE_MODE assert isinstance(record, (list, tuple)), \ "unexpected record type: " + repr(type(record)) assert len(record) == self._fieldCount, \ "len(record): %s, fieldCount: %s" % (len(record), self._fieldCount) # Write header if needed if self._recordCount == 0: # Write the header names, types, specials = zip(*self.getFields()) for line in names, types, specials: self._writer.writerow(line) # Keep track of sequences, make sure time flows forward self._updateSequenceInfo(record) line = [self._adapters[i](f) for i, f in enumerate(record)] self._writer.writerow(line) self._recordCount += 1 ############################################################################# def appendRecords(self, records, inputRef=None, progressCB=None): """ Saves multiple records in the underlying storage. Params: records - array of records as in 'appendRecord' inputRef - reference to the corresponding input (not applicable in case of a file storage) progressCB - callback to report progress Returns: nothing """ # input ref is not applicable in case of a file storage inputRef = inputRef for record in records: self.appendRecord(record, None) if progressCB is not None: progressCB() ############################################################################# def getBookmark(self): """ Returns an anchor to the current position in the data. Passing this anchor to a constructor makes the current position to be the first returned record. """ if self._write and self._recordCount==0: return None rowDict = dict(filepath=os.path.realpath(self._filename), currentRow=self._recordCount) return json.dumps(rowDict) ############################################################################# def recordsExistAfter(self, bookmark): """Returns True iff there are records left after the bookmark.""" return (self.getDataRowCount() - self.getNextRecordIdx()) > 0 ############################################################################# def seekFromEnd(self, numRecords): """Seeks to numRecords from the end and returns a bookmark to the new position. """ self._file.seek(self._getTotalLineCount() - numRecords) return self.getBookmark() ############################################################################## def setAutoRewind(self, autoRewind): """ Controls whether getNext() should automatically rewind the source when EOF is reached. autoRewind: True = getNext() will automatically rewind the source on EOF; False = getNext() will not automatically rewind the source on EOF """ self.rewindAtEOF = autoRewind ############################################################################# def getStats(self): """ Parse the file using dedicated reader and collect fields stats. Never called if user of FileRecordStream does not invoke getStats method. Returns: a dictionary of stats. In the current implementation, min and max fields are supported. Example of the return dictionary is: { 'min' : [f1_min, f2_min, None, None, fn_min], 'max' : [f1_max, f2_max, None, None, fn_max] } (where fx_min/fx_max are set for scalar fields, or None if not) """ # Collect stats only once per File object, use fresh csv iterator # to keep the next() method returning sequential records no matter when # caller asks for stats if self._stats == None: # Stats are only available when reading csv file assert (self._mode == self._FILE_READ_MODE) inFile = open(self._filename, self._FILE_READ_MODE) os.linesep = '\n' # make sure readline() works on windows too. # Create a new reader; read names, types, specials reader = csv.reader(inFile, dialect='excel', quoting=csv.QUOTE_NONE) names = [n.strip() for n in reader.next()] types = [t.strip() for t in reader.next()] # Skip over specials reader.next() # Initialize stats to all None self._stats = dict() self._stats['min'] = [] self._stats['max'] = [] for i in xrange(len(names)): self._stats['min'].append(None) self._stats['max'].append(None) # Read the file, collect stats while True: try: line = reader.next() for i, f in enumerate(line): if len(types) > i and types[i] in ['int', 'float'] and f not in self._missingValues: value = self._adapters[i](f) if self._stats['max'][i] == None or \ self._stats['max'][i] < value: self._stats['max'][i] = value if self._stats['min'][i] == None or \ self._stats['min'][i] > value: self._stats['min'][i] = value except StopIteration: break return self._stats ############################################################################# def clearStats(self): """ Resets stats collected so far. """ self._stats = None ############################################################################# def getError(self): """ Returns errors saved in the stream. """ # CSV file version does not provide storage for the error information return None ############################################################################# def setError(self, error): """ Saves specified error in the stream. """ # CSV file version does not provide storage for the error information return ############################################################################# def isCompleted(self): """ Returns True if all records are already in the stream or False if more records is expected. """ # CSV file is always considered completed return True ############################################################################# def setCompleted(self, completed=True): """ Marks the stream completed (True or False) """ # CSV file is always considered completed, nothing to do return ############################################################################# def getFieldNames(self): """ Returns an array of field names associated with the data. """ return [f[0] for f in self._fields] ############################################################################# def getFields(self): """ Returns a sequence of nupic.data.fieldmeta.FieldMetaInfo name/type/special tuples for each field in the stream. """ if self._fields == None: return None else: return copy.copy(self._fields) ############################################################################# ############################################################################# def _updateSequenceInfo(self, r): """Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file. """ # Get current sequence id (if any) newSequence = False sequenceId = r[self._sequenceIdIdx] if self._sequenceIdIdx is not None else None if sequenceId != self._currSequence: # verify that the new sequence didn't show up before if sequenceId in self._sequences: raise Exception('Broken sequence: %s, record: %s' % \ (sequenceId, r)) # add the finished sequence to the set of sequence self._sequences.add(self._currSequence) self._currSequence = sequenceId # Verify that the reset is consistent (if there is one) if self._resetIdx: assert r[self._resetIdx] == 1 newSequence = True else: # Check the reset reset = False if self._resetIdx: reset = r[self._resetIdx] if reset == 1: newSequence = True # If it's still the same old sequence make sure the time flows forward if not newSequence: if self._timeStampIdx and self._currTime is not None: t = r[self._timeStampIdx] if t < self._currTime: raise Exception('No time travel. Early timestamp for record: %s' % r) if self._timeStampIdx: self._currTime = r[self._timeStampIdx] ############################################################################# def _getStartRow(self, bookmark): """ Extracts start row from the bookmark information """ bookMarkDict = json.loads(bookmark) realpath = os.path.realpath(self._filename) bookMarkFile = bookMarkDict.get('filepath', None) if bookMarkFile != realpath: print ("Ignoring bookmark due to mismatch between File's " "filename realpath vs. bookmark; realpath: %r; bookmark: %r") % ( realpath, bookMarkDict) return 0 else: return bookMarkDict['currentRow'] ############################################################################# def _getTotalLineCount(self): """ Returns: count of ALL lines in dataset, including header lines """ # Flush the file before we open it again to count lines if self._mode == self._FILE_WRITE_MODE: self._file.flush() return sum(1 for line in open(self._filename, self._FILE_READ_MODE)) ############################################################################# def getNextRecordIdx(self): """Returns the index of the record that will be read next from getNextRecord() """ return self._recordCount ############################################################################# def getDataRowCount(self): """ Returns: count of data rows in dataset (excluding header lines) """ numLines = self._getTotalLineCount() if numLines == 0: # this may be the case in a file opened for write before the # header rows are written out assert self._mode == self._FILE_WRITE_MODE and self._recordCount == 0 numDataRows = 0 else: numDataRows = numLines - self._NUM_HEADER_ROWS assert numDataRows >= 0 return numDataRows ############################################################################# def setTimeout(self, timeout): """ Set the read timeout """ pass ############################################################################# def flush(self): if self._file is not None: self._file.flush() ############################################################################# ############################################################################# def __enter__(self): """Context guard - enter Just return the object """ return self ############################################################################# def __exit__(self, yupe, value, traceback): """Context guard - exit Ensures that the file is always closed at the end of the 'with' block. Lets exceptions propagate. """ self.close() ############################################################################# def __iter__(self): """Support for the iterator protocol. Return itself""" return self ############################################################################# def next(self): """Implement the iterator protocol """ record = self.getNextRecord() if record is None: raise StopIteration return record
1
17,174
I'd end the sentence after `special indicator (line 3)`. And then start a new sentence: `The special field can indicate that the field specifies a reset, is a sequence ID, or is a timestamp for the record.`
numenta-nupic
py
@@ -105,11 +105,9 @@ def bbox_mapping_back(bboxes, img_shape, scale_factor, flip): def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. - Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. - Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """
1
import mmcv import numpy as np import torch def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]): assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def delta2bbox(rois, deltas, means=[0, 0, 0, 0], stds=[1, 1, 1, 1], max_shape=None, wh_ratio_clip=16 / 1000): means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) gw = pw * dw.exp() gh = ph * dh.exp() gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy x1 = gx - gw * 0.5 + 0.5 y1 = gy - gh * 0.5 + 0.5 x2 = gx + gw * 0.5 - 0.5 y2 = gy + gh * 0.5 - 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes def bbox_flip(bboxes, img_shape): """Flip bboxes horizontally. Args: bboxes(Tensor or ndarray): Shape (..., 4*k) img_shape(tuple): Image shape. Returns: Same type as `bboxes`: Flipped bboxes. """ if isinstance(bboxes, torch.Tensor): assert bboxes.shape[-1] % 4 == 0 flipped = bboxes.clone() flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1 flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1 return flipped elif isinstance(bboxes, np.ndarray): return mmcv.bbox_flip(bboxes, img_shape) def bbox_mapping(bboxes, img_shape, scale_factor, flip): """Map bboxes from the original image scale to testing scale""" new_bboxes = bboxes * scale_factor if flip: new_bboxes = bbox_flip(new_bboxes, img_shape) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip): """Map bboxes from testing scale to original image scale""" new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes new_bboxes = new_bboxes / scale_factor return new_bboxes def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (Tensor): shape (n, 5) labels (Tensor): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [ np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1) ] else: bboxes = bboxes.cpu().numpy() labels = labels.cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes - 1)]
1
16,979
These empty lines can be kept.
open-mmlab-mmdetection
py
@@ -176,14 +176,14 @@ public class VelocityResponseWriterTest extends SolrTestCaseJ4 { // The test init properties file turns off being able to use $foreach.index (the implicit loop counter) // The foreach.vm template uses $!foreach.index, with ! suppressing the literal "$foreach.index" output - assertEquals("01", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"foreach"))); - assertEquals("", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach"))); + assertEquals("01", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"foreach")).trim()); + assertEquals("", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach")).trim()); // Turn off trusted configset, which disables the init properties h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false); assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted()); - assertEquals("01", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach"))); + assertEquals("01", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach")).trim()); // set the harness back to the default of trusted h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.velocity; import java.io.IOException; import java.io.StringReader; import java.io.StringWriter; import java.security.AccessControlException; import java.util.Properties; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; import org.apache.solr.common.util.NamedList; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.QueryResponseWriter; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.response.VelocityResponseWriter; import org.apache.velocity.exception.MethodInvocationException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; public class VelocityResponseWriterTest extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig.xml", "schema.xml", getFile("velocity/solr").getAbsolutePath()); } @AfterClass public static void afterClass() throws Exception { } @Override public void setUp() throws Exception { // This test case toggles the configset used from trusted to untrusted - return to default of trusted for each test h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true); super.setUp(); } @Test public void testVelocityResponseWriterRegistered() { QueryResponseWriter writer = h.getCore().getQueryResponseWriter("velocity"); assertTrue("VrW registered check", writer instanceof VelocityResponseWriter); } @Test public void testSecureUberspector() throws Exception { VelocityResponseWriter vrw = new VelocityResponseWriter(); NamedList<String> nl = new NamedList<>(); nl.add("template.base.dir", getFile("velocity").getAbsolutePath()); vrw.init(nl); SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"outside_the_box"); SolrQueryResponse rsp = new SolrQueryResponse(); StringWriter buf = new StringWriter(); vrw.write(buf, req, rsp); assertEquals("$ex",buf.toString()); // $ex rendered literally because it is null, and thus did not succeed to break outside the box } @Test @Ignore("SOLR-14025: Velocity's SecureUberspector addresses this") public void testTemplateSandbox() throws Exception { assumeTrue("This test only works with security manager", System.getSecurityManager() != null); VelocityResponseWriter vrw = new VelocityResponseWriter(); NamedList<String> nl = new NamedList<>(); nl.add("template.base.dir", getFile("velocity").getAbsolutePath()); vrw.init(nl); SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"outside_the_box"); SolrQueryResponse rsp = new SolrQueryResponse(); StringWriter buf = new StringWriter(); try { vrw.write(buf, req, rsp); fail("template broke outside the box, retrieved: " + buf); } catch (MethodInvocationException e) { assertNotNull(e.getCause()); assertEquals(AccessControlException.class, e.getCause().getClass()); // expected failure, can't get outside the box } } @Test @Ignore("SOLR-14025: Velocity's SecureUberspector addresses this") public void testSandboxIntersection() throws Exception { assumeTrue("This test only works with security manager", System.getSecurityManager() != null); VelocityResponseWriter vrw = new VelocityResponseWriter(); NamedList<String> nl = new NamedList<>(); nl.add("template.base.dir", getFile("velocity").getAbsolutePath()); vrw.init(nl); SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"sandbox_intersection"); SolrQueryResponse rsp = new SolrQueryResponse(); StringWriter buf = new StringWriter(); try { vrw.write(buf, req, rsp); fail("template broke outside the box, retrieved: " + buf); } catch (MethodInvocationException e) { assertNotNull(e.getCause()); assertEquals(AccessControlException.class, e.getCause().getClass()); // expected failure, can't get outside the box } } @Test public void testFileResourceLoader() throws Exception { VelocityResponseWriter vrw = new VelocityResponseWriter(); NamedList<String> nl = new NamedList<>(); nl.add("template.base.dir", getFile("velocity").getAbsolutePath()); vrw.init(nl); SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"file"); SolrQueryResponse rsp = new SolrQueryResponse(); StringWriter buf = new StringWriter(); vrw.write(buf, req, rsp); assertEquals("testing", buf.toString()); } @Test public void testTemplateTrust() throws Exception { // Try on trusted configset.... assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound"))); // Turn off trusted configset, which disables the Solr resource loader h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false); assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted()); try { assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound"))); fail("template rendering should have failed, from an untrusted configset"); } catch (IOException e) { // expected exception assertEquals(IOException.class, e.getClass()); } // set the harness back to the default of trusted h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true); } @Test public void testSolrResourceLoaderTemplate() throws Exception { assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound"))); } @Test public void testEncoding() throws Exception { assertEquals("éñçø∂îñg", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"encoding"))); } @Test public void testMacros() throws Exception { // tests that a macro in a custom macros.vm is visible assertEquals("test_macro_SUCCESS", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_visible"))); // tests that a builtin (_macros.vm) macro, #url_root in this case, can be overridden in a custom macros.vm // the macro is also defined in VM_global_library.vm, which should also be overridden by macros.vm assertEquals("Loaded from: macros.vm", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_overridden"))); // tests that macros defined in VM_global_library.vm are visible. This file was where macros in pre-5.0 versions were defined assertEquals("legacy_macro_SUCCESS", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_legacy_support"))); } @Test public void testInitProps() throws Exception { // The test init properties file turns off being able to use $foreach.index (the implicit loop counter) // The foreach.vm template uses $!foreach.index, with ! suppressing the literal "$foreach.index" output assertEquals("01", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"foreach"))); assertEquals("", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach"))); // Turn off trusted configset, which disables the init properties h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false); assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted()); assertEquals("01", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach"))); // set the harness back to the default of trusted h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true); } @Test public void testCustomTools() throws Exception { // Render this template once without a custom tool defined, and once with it defined. The tool has a `.star` method. // The tool added as `mytool`, `log`, and `response`. `log` is designed to be overridable, but not `response` // mytool.star=$!mytool.star("LATERALUS") // mytool.locale=$!mytool.locale // log.star=$!log.star("log overridden") // response.star=$!response.star("response overridden??") // First without the tool defined, with `$!` turning null object/method references into empty string Properties rendered_props = new Properties(); String rsp = h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"custom_tool")); rendered_props.load(new StringReader(rsp)); // ignore mytool.locale here, as it will be the random test one assertEquals("",rendered_props.getProperty("mytool.star")); assertEquals("",rendered_props.getProperty("log.star")); assertEquals("",rendered_props.getProperty("response.star")); // Now with custom tools defined: rsp = h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"custom_tool",VelocityResponseWriter.LOCALE, "de_DE")); rendered_props.clear(); rendered_props.load(new StringReader(rsp)); assertEquals("** LATERALUS **",rendered_props.getProperty("mytool.star")); assertEquals("** log overridden **",rendered_props.getProperty("log.star")); assertEquals("",rendered_props.getProperty("response.star")); assertEquals("de_DE",rendered_props.getProperty("mytool.locale")); // Turn off trusted configset, which disables the custom tool injection h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false); assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted()); rsp = h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"custom_tool",VelocityResponseWriter.LOCALE, "de_DE")); rendered_props.clear(); rendered_props.load(new StringReader(rsp)); assertEquals("",rendered_props.getProperty("mytool.star")); assertEquals("",rendered_props.getProperty("log.star")); assertEquals("",rendered_props.getProperty("response.star")); assertEquals("",rendered_props.getProperty("mytool.locale")); // set the harness back to the default of trusted h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true); // Custom tools can also have a SolrCore-arg constructor because they are instantiated with SolrCore.createInstance // TODO: do we really need to support this? no great loss, as a custom tool could take a SolrCore object as a parameter to // TODO: any method, so one could do $mytool.my_method($request.core) // I'm currently inclined to make this feature undocumented/unsupported, as we may want to instantiate classes // in a different manner that only supports no-arg constructors, commented (passing) test case out // assertEquals("collection1", h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"t", // SolrParamResourceLoader.TEMPLATE_PARAM_PREFIX+"t", "$mytool.core.name"))) // - NOTE: example uses removed inline param; convert to external template as needed } @Test public void testLocaleFeature() throws Exception { assertEquals("Color", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "locale", VelocityResponseWriter.LOCALE,"en_US"))); assertEquals("Colour", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "locale", VelocityResponseWriter.LOCALE,"en_UK"))); // Test that $resource.get(key,baseName,locale) works with specified locale assertEquals("Colour", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"resource_get"))); // Test that $number tool uses the specified locale assertEquals("2,112", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"locale_number", VelocityResponseWriter.LOCALE, "en_US"))); assertEquals("2.112", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"locale_number", VelocityResponseWriter.LOCALE, "de_DE"))); } @Test public void testLayoutFeature() throws Exception { assertEquals("{{{0}}}", h.query(req("q","*:*", "wt","velocity", VelocityResponseWriter.TEMPLATE,"numFound", VelocityResponseWriter.LAYOUT,"layout"))); // even with v.layout specified, layout can be disabled explicitly assertEquals("0", h.query(req("q","*:*", "wt","velocity", VelocityResponseWriter.TEMPLATE,"numFound", VelocityResponseWriter.LAYOUT,"layout", VelocityResponseWriter.LAYOUT_ENABLED,"false"))); } @Test public void testJSONWrapper() throws Exception { assertEquals("foo({\"result\":\"0\"})", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.JSON,"foo"))); // Now with layout, for good measure assertEquals("foo({\"result\":\"{{{0}}}\"})", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.JSON,"foo", VelocityResponseWriter.LAYOUT,"layout"))); assertQEx("Bad function name should throw exception", req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.JSON,"<foo>"), SolrException.ErrorCode.BAD_REQUEST ); } @Test public void testContentType() { VelocityResponseWriter vrw = new VelocityResponseWriter(); NamedList<String> nl = new NamedList<>(); vrw.init(nl); SolrQueryResponse rsp = new SolrQueryResponse(); // with v.json=wrf, content type should default to application/json assertEquals("application/json;charset=UTF-8", vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.JSON, "wrf"), rsp)); // with no v.json specified, the default text/html should be returned assertEquals("text/html;charset=UTF-8", vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound"), rsp)); // if v.contentType is specified, that should be used, even if v.json is specified assertEquals("text/plain", vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.CONTENT_TYPE,"text/plain"), rsp)); assertEquals("text/plain", vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound", VelocityResponseWriter.JSON,"wrf", VelocityResponseWriter.CONTENT_TYPE,"text/plain"), rsp)); } }
1
41,030
For some reason these tests started failing due to an extra `NEWLINE` returned from `h.query()`. Have not tried to check whether this may be a problem for real templates or not, just fixed the failing tests. Anyone with an opinion here?
apache-lucene-solr
java
@@ -42,6 +42,9 @@ type TraceContext struct{} var _ propagation.TextFormat = TraceContext{} var traceCtxRegExp = regexp.MustCompile("^[0-9a-f]{2}-[a-f0-9]{32}-[a-f0-9]{16}-[a-f0-9]{2}-?") +// DefaultPropagator is the default trace propagator. +var DefaultPropagator propagation.TextFormat = TraceContext{} + func (hp TraceContext) Inject(ctx context.Context, supplier propagation.Supplier) { sc := SpanFromContext(ctx).SpanContext() if sc.IsValid() {
1
// Copyright 2019, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "encoding/hex" "fmt" "net/url" "regexp" "strings" "go.opentelemetry.io/otel/api/core" "go.opentelemetry.io/otel/api/correlation" "go.opentelemetry.io/otel/api/key" "go.opentelemetry.io/otel/api/propagation" ) const ( supportedVersion = 0 maxVersion = 254 TraceparentHeader = "Traceparent" CorrelationContextHeader = "Correlation-Context" ) // TraceContext propagates SpanContext in W3C TraceContext format. //nolint:golint type TraceContext struct{} var _ propagation.TextFormat = TraceContext{} var traceCtxRegExp = regexp.MustCompile("^[0-9a-f]{2}-[a-f0-9]{32}-[a-f0-9]{16}-[a-f0-9]{2}-?") func (hp TraceContext) Inject(ctx context.Context, supplier propagation.Supplier) { sc := SpanFromContext(ctx).SpanContext() if sc.IsValid() { h := fmt.Sprintf("%.2x-%s-%.16x-%.2x", supportedVersion, sc.TraceIDString(), sc.SpanID, sc.TraceFlags&core.TraceFlagsSampled) supplier.Set(TraceparentHeader, h) } correlationCtx := correlation.FromContext(ctx) firstIter := true var headerValueBuilder strings.Builder correlationCtx.Foreach(func(kv core.KeyValue) bool { if !firstIter { headerValueBuilder.WriteRune(',') } firstIter = false headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace((string)(kv.Key)))) headerValueBuilder.WriteRune('=') headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace(kv.Value.Emit()))) return true }) if headerValueBuilder.Len() > 0 { headerString := headerValueBuilder.String() supplier.Set(CorrelationContextHeader, headerString) } } func (hp TraceContext) Extract( ctx context.Context, supplier propagation.Supplier, ) (core.SpanContext, correlation.Map) { return hp.extractSpanContext(ctx, supplier), hp.extractCorrelationCtx(ctx, supplier) } func (hp TraceContext) extractSpanContext( ctx context.Context, supplier propagation.Supplier, ) core.SpanContext { h := supplier.Get(TraceparentHeader) if h == "" { return core.EmptySpanContext() } h = strings.Trim(h, "-") if !traceCtxRegExp.MatchString(h) { return core.EmptySpanContext() } sections := strings.Split(h, "-") if len(sections) < 4 { return core.EmptySpanContext() } if len(sections[0]) != 2 { return core.EmptySpanContext() } ver, err := hex.DecodeString(sections[0]) if err != nil { return core.EmptySpanContext() } version := int(ver[0]) if version > maxVersion { return core.EmptySpanContext() } if version == 0 && len(sections) != 4 { return core.EmptySpanContext() } if len(sections[1]) != 32 { return core.EmptySpanContext() } var sc core.SpanContext sc.TraceID, err = core.TraceIDFromHex(sections[1][:32]) if err != nil { return core.EmptySpanContext() } if len(sections[2]) != 16 { return core.EmptySpanContext() } sc.SpanID, err = core.SpanIDFromHex(sections[2]) if err != nil { return core.EmptySpanContext() } if len(sections[3]) != 2 { return core.EmptySpanContext() } opts, err := hex.DecodeString(sections[3]) if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { return core.EmptySpanContext() } sc.TraceFlags = opts[0] &^ core.TraceFlagsUnused if !sc.IsValid() { return core.EmptySpanContext() } return sc } func (hp TraceContext) extractCorrelationCtx(ctx context.Context, supplier propagation.Supplier) correlation.Map { correlationContext := supplier.Get(CorrelationContextHeader) if correlationContext == "" { return correlation.NewEmptyMap() } contextValues := strings.Split(correlationContext, ",") keyValues := make([]core.KeyValue, 0, len(contextValues)) for _, contextValue := range contextValues { valueAndProps := strings.Split(contextValue, ";") if len(valueAndProps) < 1 { continue } nameValue := strings.Split(valueAndProps[0], "=") if len(nameValue) < 2 { continue } name, err := url.QueryUnescape(nameValue[0]) if err != nil { continue } trimmedName := strings.TrimSpace(name) value, err := url.QueryUnescape(nameValue[1]) if err != nil { continue } trimmedValue := strings.TrimSpace(value) // TODO (skaris): properties defiend https://w3c.github.io/correlation-context/, are currently // just put as part of the value. var trimmedValueWithProps strings.Builder trimmedValueWithProps.WriteString(trimmedValue) for _, prop := range valueAndProps[1:] { trimmedValueWithProps.WriteRune(';') trimmedValueWithProps.WriteString(prop) } keyValues = append(keyValues, key.New(trimmedName).String(trimmedValueWithProps.String())) } return correlation.NewMap(correlation.MapUpdate{ MultiKV: keyValues, }) } func (hp TraceContext) GetAllKeys() []string { return []string{TraceparentHeader, CorrelationContextHeader} }
1
11,026
It is better to have a method than a var.
open-telemetry-opentelemetry-go
go
@@ -58,9 +58,11 @@ func (s scannerCtxExecMgrFactory) NewExecutionManager(shardID int32) (persistenc const ( scannerContextKey = contextKey(0) - maxConcurrentActivityExecutionSize = 10 - maxConcurrentWorkflowTaskExecutionSize = 10 - infiniteDuration = 20 * 365 * 24 * time.Hour + maxConcurrentActivityExecutionSize = 10 + maxConcurrentWorkflowExecutionSize = 10 + maxConcurrentActivityTaskPollers = 16 + maxConcurrentWorkflowTaskPollers = 16 + infiniteDuration = 20 * 365 * 24 * time.Hour tqScannerWFID = "temporal-sys-tq-scanner" tqScannerWFTypeName = "temporal-sys-tq-scanner-workflow"
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package scanner import ( "context" "time" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/sdk/activity" "go.temporal.io/sdk/client" "go.temporal.io/sdk/temporal" "go.temporal.io/sdk/workflow" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/persistence" "go.temporal.io/server/service/worker/scanner/executions" "go.temporal.io/server/service/worker/scanner/history" "go.temporal.io/server/service/worker/scanner/taskqueue" ) type ( contextKey int scannerCtxExecMgrFactory struct { ctx scannerContext } ) func (s scannerCtxExecMgrFactory) Close() {} func (s scannerCtxExecMgrFactory) NewExecutionManager(shardID int32) (persistence.ExecutionManager, error) { return s.ctx.GetExecutionManager(shardID) } const ( scannerContextKey = contextKey(0) maxConcurrentActivityExecutionSize = 10 maxConcurrentWorkflowTaskExecutionSize = 10 infiniteDuration = 20 * 365 * 24 * time.Hour tqScannerWFID = "temporal-sys-tq-scanner" tqScannerWFTypeName = "temporal-sys-tq-scanner-workflow" tqScannerTaskQueueName = "temporal-sys-tq-scanner-taskqueue-0" taskQueueScavengerActivityName = "temporal-sys-tq-scanner-scvg-activity" historyScannerWFID = "temporal-sys-history-scanner" historyScannerWFTypeName = "temporal-sys-history-scanner-workflow" historyScannerTaskQueueName = "temporal-sys-history-scanner-taskqueue-0" historyScavengerActivityName = "temporal-sys-history-scanner-scvg-activity" executionsScannerWFID = "temporal-sys-executions-scanner" executionsScannerWFTypeName = "temporal-sys-executions-scanner-workflow" executionsScannerTaskQueueName = "temporal-sys-executions-scanner-taskqueue-0" executionsScavengerActivityName = "temporal-sys-executions-scanner-scvg-activity" ) var ( tlScavengerHBInterval = 10 * time.Second executionsScavengerHBInterval = 10 * time.Second activityRetryPolicy = temporal.RetryPolicy{ InitialInterval: 10 * time.Second, BackoffCoefficient: 1.7, MaximumInterval: 5 * time.Minute, } activityOptions = workflow.ActivityOptions{ ScheduleToStartTimeout: 5 * time.Minute, StartToCloseTimeout: infiniteDuration, HeartbeatTimeout: 5 * time.Minute, RetryPolicy: &activityRetryPolicy, } tlScannerWFStartOptions = client.StartWorkflowOptions{ ID: tqScannerWFID, TaskQueue: tqScannerTaskQueueName, WorkflowRunTimeout: 5 * 24 * time.Hour, WorkflowIDReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, CronSchedule: "0 */12 * * *", } historyScannerWFStartOptions = client.StartWorkflowOptions{ ID: historyScannerWFID, TaskQueue: historyScannerTaskQueueName, WorkflowIDReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, CronSchedule: "0 */12 * * *", } executionsScannerWFStartOptions = client.StartWorkflowOptions{ ID: executionsScannerWFID, TaskQueue: executionsScannerTaskQueueName, WorkflowIDReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, CronSchedule: "0 */12 * * *", } ) // TaskQueueScannerWorkflow is the workflow that runs the task queue scanner background daemon func TaskQueueScannerWorkflow( ctx workflow.Context, ) error { future := workflow.ExecuteActivity(workflow.WithActivityOptions(ctx, activityOptions), taskQueueScavengerActivityName) return future.Get(ctx, nil) } // HistoryScannerWorkflow is the workflow that runs the history scanner background daemon func HistoryScannerWorkflow( ctx workflow.Context, ) error { future := workflow.ExecuteActivity( workflow.WithActivityOptions(ctx, activityOptions), historyScavengerActivityName, ) return future.Get(ctx, nil) } // ExecutionsScannerWorkflow is the workflow that runs the executions scanner background daemon func ExecutionsScannerWorkflow( ctx workflow.Context, ) error { future := workflow.ExecuteActivity(workflow.WithActivityOptions(ctx, activityOptions), executionsScavengerActivityName) return future.Get(ctx, nil) } // HistoryScavengerActivity is the activity that runs history scavenger func HistoryScavengerActivity( activityCtx context.Context, ) (history.ScavengerHeartbeatDetails, error) { ctx := activityCtx.Value(scannerContextKey).(scannerContext) rps := ctx.cfg.PersistenceMaxQPS() numShards := ctx.cfg.Persistence.NumHistoryShards hbd := history.ScavengerHeartbeatDetails{} if activity.HasHeartbeatDetails(activityCtx) { if err := activity.GetHeartbeatDetails(activityCtx, &hbd); err != nil { ctx.GetLogger().Error("Failed to recover from last heartbeat, start over from beginning", tag.Error(err)) } } scavenger := history.NewScavenger( numShards, ctx.GetHistoryManager(), rps, ctx.GetHistoryClient(), hbd, ctx.GetMetricsClient(), ctx.GetLogger(), ) return scavenger.Run(activityCtx) } // TaskQueueScavengerActivity is the activity that runs task queue scavenger func TaskQueueScavengerActivity( activityCtx context.Context, ) error { ctx := activityCtx.Value(scannerContextKey).(scannerContext) scavenger := taskqueue.NewScavenger(ctx.GetTaskManager(), ctx.GetMetricsClient(), ctx.GetLogger()) ctx.GetLogger().Info("Starting task queue scavenger") scavenger.Start() for scavenger.Alive() { activity.RecordHeartbeat(activityCtx) if activityCtx.Err() != nil { ctx.GetLogger().Info("activity context error, stopping scavenger", tag.Error(activityCtx.Err())) scavenger.Stop() return activityCtx.Err() } time.Sleep(tlScavengerHBInterval) } return nil } // ExecutionsScavengerActivity is the activity that runs executions scavenger func ExecutionsScavengerActivity( activityCtx context.Context, ) error { ctx := activityCtx.Value(scannerContextKey).(scannerContext) metricsClient := ctx.GetMetricsClient() scavenger := executions.NewScavenger( ctx.cfg.Persistence.NumHistoryShards, scannerCtxExecMgrFactory{ctx}, // as persistence.ExecutionManagerFactory ctx.GetHistoryManager(), metricsClient, ctx.GetLogger(), ) scavenger.Start() for scavenger.Alive() { activity.RecordHeartbeat(activityCtx) if activityCtx.Err() != nil { ctx.GetLogger().Info("activity context error, stopping scavenger", tag.Error(activityCtx.Err())) scavenger.Stop() return activityCtx.Err() } time.Sleep(executionsScavengerHBInterval) } return nil }
1
12,272
concurrent poller won't be larger than concurrent execution. Jump from 2 -> 16 seems aggressive. I suggest to use 8. Ideally, this should be dynamic config. :)
temporalio-temporal
go
@@ -0,0 +1,5 @@ +_base_ = ['./yolact_r50.py'] + +model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) + +work_dir = './work_dirs/yolact_r101'
1
1
20,875
List is redundant.
open-mmlab-mmdetection
py
@@ -88,8 +88,9 @@ public class LoginActivity extends BaseActivity implements CustomTabActivityHelp setTitle(getString(R.string.txtSignIn)); setSupportActionBar(toolbar); - getSupportActionBar().setDisplayHomeAsUpEnabled(true); - + if(getSupportActionBar()!=null) { + getSupportActionBar().setDisplayHomeAsUpEnabled(true); + } userLoginUri = Uri.parse(getString(R.string.website) + "cgi/user.pl"); resetPasswordUri = Uri.parse(getString(R.string.website) + "cgi/reset_password.pl");
1
package openfoodfacts.github.scrachx.openfood.views; import android.app.Activity; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.content.pm.ActivityInfo; import android.hardware.Sensor; import android.hardware.SensorManager; import android.net.Uri; import android.os.Bundle; import android.preference.PreferenceManager; import android.support.annotation.NonNull; import android.support.customtabs.CustomTabsIntent; import android.support.design.widget.Snackbar; import android.support.v4.content.ContextCompat; import android.support.v7.widget.Toolbar; import android.text.TextUtils; import android.util.Log; import android.widget.Button; import android.widget.EditText; import android.widget.LinearLayout; import android.widget.TextView; import android.widget.Toast; import com.afollestad.materialdialogs.MaterialDialog; import net.steamcrafted.loadtoast.LoadToast; import java.io.IOException; import java.net.HttpCookie; import butterknife.BindView; import butterknife.OnClick; import okhttp3.ResponseBody; import openfoodfacts.github.scrachx.openfood.BuildConfig; import openfoodfacts.github.scrachx.openfood.R; import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService; import openfoodfacts.github.scrachx.openfood.utils.ShakeDetector; import openfoodfacts.github.scrachx.openfood.utils.Utils; import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper; import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper; import openfoodfacts.github.scrachx.openfood.views.customtabs.WebViewFallback; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; import retrofit2.Retrofit; /** * A login screen that offers login via login/password. * This Activity connect to the Chrome Custom Tabs Service on startup to prefetch the url. */ public class LoginActivity extends BaseActivity implements CustomTabActivityHelper.ConnectionCallback { @BindView(R.id.toolbar) Toolbar toolbar; @BindView(R.id.editTextLogin) EditText loginView; @BindView(R.id.editTextPass) EditText passwordView; @BindView(R.id.textInfoLogin) TextView infoLogin; @BindView(R.id.buttonSave) Button save; @BindView(R.id.buttonCreateAccount) Button signup; @BindView(R.id.login_linearlayout) LinearLayout linearLayout; private OpenFoodAPIService apiClient; private CustomTabActivityHelper customTabActivityHelper; private Uri userLoginUri; private Uri resetPasswordUri; private SensorManager mSensorManager; private Sensor mAccelerometer; private ShakeDetector mShakeDetector; // boolean to determine if scan on shake feature should be enabled private boolean scanOnShake; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); if (getResources().getBoolean(R.bool.portrait_only)) { setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT); } setContentView(R.layout.activity_login); setTitle(getString(R.string.txtSignIn)); setSupportActionBar(toolbar); getSupportActionBar().setDisplayHomeAsUpEnabled(true); userLoginUri = Uri.parse(getString(R.string.website) + "cgi/user.pl"); resetPasswordUri = Uri.parse(getString(R.string.website) + "cgi/reset_password.pl"); // prefetch the uri customTabActivityHelper = new CustomTabActivityHelper(); customTabActivityHelper.setConnectionCallback(this); customTabActivityHelper.mayLaunchUrl(userLoginUri, null, null); signup.setEnabled(false); final SharedPreferences settings = getSharedPreferences("login", 0); String loginS = settings.getString("user", getResources().getString(R.string.txt_anonymous)); if (!loginS.equals(getResources().getString(R.string.txt_anonymous))) { new MaterialDialog.Builder(this) .title(R.string.log_in) .content(R.string.login_true) .neutralText(R.string.ok_button) .show(); } apiClient = new Retrofit.Builder() .baseUrl(BuildConfig.HOST) .client(Utils.HttpClientBuilder()) .build() .create(OpenFoodAPIService.class); // Get the user preference for scan on shake feature and open ContinuousScanActivity if the user has enabled the feature mSensorManager = (SensorManager) getSystemService(Context.SENSOR_SERVICE); mAccelerometer = mSensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER); mShakeDetector = new ShakeDetector(); SharedPreferences shakePreference = PreferenceManager.getDefaultSharedPreferences(this); scanOnShake = shakePreference.getBoolean("shakeScanMode", false); mShakeDetector.setOnShakeListener(new ShakeDetector.OnShakeDetected() { @Override public void onShake(int count) { if (scanOnShake) { Utils.scan(LoginActivity.this); } } }); } @OnClick(R.id.buttonSave) protected void attemptLogin() { String login = loginView.getText().toString(); String password = passwordView.getText().toString(); if (TextUtils.isEmpty(login)) { loginView.setError(getString(R.string.error_field_required)); loginView.requestFocus(); return; } if (!(password.length() >= 6)) { passwordView.setError(getString(R.string.error_invalid_password)); passwordView.requestFocus(); return; } Snackbar snackbar = Snackbar .make(linearLayout, R.string.toast_retrieving, Snackbar.LENGTH_LONG); snackbar.show(); final LoadToast lt = new LoadToast(this); save.setClickable(false); lt.setText(getString(R.string.toast_retrieving)); lt.setBackgroundColor(ContextCompat.getColor(this, R.color.blue)); lt.setTextColor(ContextCompat.getColor(this, R.color.white)); lt.show(); final Activity context = this; apiClient.signIn(login, password, "Sign-in").enqueue(new Callback<ResponseBody>() { @Override public void onResponse(@NonNull Call<ResponseBody> call, @NonNull Response<ResponseBody> response) { if (!response.isSuccessful()) { Toast.makeText(context, context.getString(R.string.errorWeb), Toast.LENGTH_LONG).show(); Utils.hideKeyboard(context); return; } String htmlNoParsed = null; try { htmlNoParsed = response.body().string(); } catch (IOException e) { Log.e("LOGIN", "Unable to parse the login response page", e); } SharedPreferences.Editor editor = context.getSharedPreferences("login", 0).edit(); if (htmlNoParsed == null || htmlNoParsed.contains("Incorrect user name or password.") || htmlNoParsed.contains("See you soon!")) { Toast.makeText(context, context.getString(R.string.errorLogin), Toast.LENGTH_LONG).show(); passwordView.setText(""); editor.putString("user", ""); editor.putString("pass", ""); editor.apply(); infoLogin.setText(R.string.txtInfoLoginNo); lt.hide(); } else { // store the user session id (user_session and user_id) for (HttpCookie httpCookie : HttpCookie.parse(response.headers().get("set-cookie"))) { if (httpCookie.getDomain().equals(".openbeautyfacts.org") && httpCookie.getPath().equals("/")) { String[] cookieValues = httpCookie.getValue().split("&"); for (int i = 0; i < cookieValues.length; i++) { editor.putString(cookieValues[i], cookieValues[++i]); } break; } } Snackbar snackbar = Snackbar .make(linearLayout, R.string.connection, Snackbar.LENGTH_LONG); snackbar.show(); Toast.makeText(context, context.getResources().getText(R.string.txtToastSaved), Toast.LENGTH_LONG).show(); editor.putString("user", login); editor.putString("pass", password); editor.apply(); infoLogin.setText(R.string.txtInfoLoginOk); setResult(RESULT_OK, new Intent()); finish(); } Utils.hideKeyboard(context); } @Override public void onFailure(@NonNull Call<ResponseBody> call, @NonNull Throwable t) { Toast.makeText(context, context.getString(R.string.errorWeb), Toast.LENGTH_LONG).show(); Utils.hideKeyboard(context); t.printStackTrace(); } }); save.setClickable(true); } @OnClick(R.id.buttonCreateAccount) protected void onCreateUser() { CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), customTabActivityHelper.getSession()); CustomTabActivityHelper.openCustomTab(this, customTabsIntent, userLoginUri, new WebViewFallback()); } @OnClick(R.id.forgotpassword) public void forgotpassword() { CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), customTabActivityHelper.getSession()); CustomTabActivityHelper.openCustomTab(this, customTabsIntent, resetPasswordUri, new WebViewFallback()); } @Override public void onCustomTabsConnected() { signup.setEnabled(true); } @Override public void onCustomTabsDisconnected() { signup.setEnabled(false); } @Override protected void onStart() { super.onStart(); customTabActivityHelper.bindCustomTabsService(this); } @Override protected void onStop() { super.onStop(); customTabActivityHelper.unbindCustomTabsService(this); signup.setEnabled(false); } @Override protected void onDestroy() { super.onDestroy(); customTabActivityHelper.setConnectionCallback(null); } @Override public void onPause() { super.onPause(); if (scanOnShake) { // unregister the listener mSensorManager.unregisterListener(mShakeDetector, mAccelerometer); } } @Override public void onResume() { super.onResume(); if (scanOnShake) { //register the listener mSensorManager.registerListener(mShakeDetector, mAccelerometer, SensorManager.SENSOR_DELAY_UI); } } }
1
65,318
Please use a string resource here rather than the hardcoded string "user". This allows the app to be multilingual.
openfoodfacts-openfoodfacts-androidapp
java
@@ -18,13 +18,10 @@ package cstorpoolit import ( "testing" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/openebs/CITF" - "github.com/openebs/CITF/citf_options" apis "github.com/openebs/CITF/pkg/apis/openebs.io/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" )
1
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cstorpoolit import ( "testing" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/openebs/CITF" "github.com/openebs/CITF/citf_options" apis "github.com/openebs/CITF/pkg/apis/openebs.io/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // TestIntegrationCstorPool function instantiate the cstor pool test suite. func TestIntegrationCstorPool(t *testing.T) { // RegisterFailHandler is used to register failed test cases and produce readable output. RegisterFailHandler(Fail) // RunSpecs runs all the test cases in the suite. RunSpecs(t, "Cstor pool integration test suite") } // Create an instance of CITF to use the inbuilt functions that will help // communicating with the kube-apiserver. var citfInstance, err = citf.NewCITF(&citfoptions.CreateOptions{ // K8SInclude is true to get the kube-config from the machine where the suite is running. // Kube-config is a config file that establishes communication to the k8s cluster. K8SInclude: true, }) // ToDo: Set up cluster environment before runninng all test cases ( i.e. BeforeSuite) // The environment set up by BeforeSuite is going to persist for all // the test cases under run //var _ = BeforeSuite(func() { // //var err error // // // //Expect(err).NotTo(HaveOccurred()) //}) // ToDo: Set up tear down of cluster environment ( i.e Aftersuite) // ToDo: Set up cluster environment before every test cases that will be run (i.e. preRunHook) // ToDo: Reset cluster environment after every test cases that will be run ( i.e postRunHook) var _ = Describe("Integration Test", func() { // Test Case #1 (sparse-striped-auto-spc). Type : Positive When("We apply sparse-striped-auto spc yaml with maxPool count equal to 3 on a k8s cluster having at least 3 capable node", func() { It("pool resources count should be 3 with no error and online status", func() { // TODO: Create a generic util function in utils.go to convert yaml into go object. // ToDo: More POC regarding this util converter function. // Functions generic to both cstor-pool and cstor-vol should go inside common directory // 1.Read SPC yaml form a file. // 2.Convert SPC yaml to json. // 3.Marshall json to SPC go object. // Create a storage pool claim object spcObject := &apis.StoragePoolClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "disk-claim-auto", }, Spec: apis.StoragePoolClaimSpec{ Name: "sparse-claim-auto", Type: "sparse", MaxPools: 3, PoolSpec: apis.CStorPoolAttr{ PoolType: "striped", }, }, } // Call CITF to create StoragePoolClaim in k8s. spcGot, err := citfInstance.K8S.CreateStoragePoolClaim(spcObject) Expect(err).To(BeNil()) // We expect nil error. // We expect 3 cstorPool objects. var maxRetry int var cspCount int maxRetry = 10 for i := 0; i < maxRetry; i++ { cspCount, err = getCstorPoolCount(spcGot.Name, citfInstance) if err != nil { break } if cspCount == 3 { break } time.Sleep(time.Second * 5) } Expect(cspCount).To(Equal(3)) // We expect 3 pool deployments. var deployCount int maxRetry = 10 for i := 0; i < maxRetry; i++ { deployCount, err = getPoolDeployCount(spcGot.Name, citfInstance) if err != nil { break } if deployCount == 3 { break } time.Sleep(time.Second * 5) } Expect(cspCount).To(Equal(3)) // We expect 3 storagePool objects. var spCount int maxRetry = 10 for i := 0; i < maxRetry; i++ { spCount, err = getStoragePoolCount(spcGot.Name, citfInstance) if err != nil { break } if spCount == 3 { break } time.Sleep(time.Second * 5) } Expect(spCount).To(Equal(3)) // We expect 'online' status on all the three cstorPool objects(i.e. 3 online counts) var onlineCspCount int maxRetry = 10 for i := 0; i < maxRetry; i++ { onlineCspCount, err = getCstorPoolStatus(spcGot.Name, citfInstance) if err != nil { break } if onlineCspCount == 3 { break } time.Sleep(time.Second * 5) } Expect(onlineCspCount).To(Equal(3)) }) }) // Test Case #2 (sparse-mirrored-auto-spc) Type:positive /*When("We apply sparse-mirrored-auto spc yaml with maxPool count equal to 3 on a k8s cluster having at least 3 capable node", func() { It("pool resources count should be 3 with no error and online status", func() { // Create a storage pool claim object // Call CITF to create StoragePoolClaim in k8s. // We expect nil error. // We expect 3 cstorPool objects. // We expect 3 pool deployments. // We expect 3 storagePool objects. // We expect 'online' status on all the three cstorPool objects(i.e. 3 online counts) }) })*/ // TODo: Add more test cases. Refer to following design doc // https://docs.google.com/document/d/1QAYK-Bsehc7v66kscXCiMJ7_pTIjzNmwyl43tF92gWA/edit })
1
10,370
@aswathkk Why did we remove this import? I guess we are using it.
openebs-maya
go
@@ -184,7 +184,6 @@ func (s *Service) Stop() { remainingTime = s.sleep(shardOwnershipTransferDelay, remainingTime) s.GetLogger().Info("ShutdownHandler: No longer taking rpc requests") - s.handler.PrepareToStop() remainingTime = s.sleep(gracePeriod, remainingTime) // TODO: Change this to GracefulStop when integration tests are refactored.
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package history import ( "sync/atomic" "time" "google.golang.org/grpc" healthpb "google.golang.org/grpc/health/grpc_health_v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/persistence" persistenceClient "go.temporal.io/server/common/persistence/client" espersistence "go.temporal.io/server/common/persistence/elasticsearch" "go.temporal.io/server/common/resource" "go.temporal.io/server/common/rpc" "go.temporal.io/server/common/service/config" "go.temporal.io/server/common/service/dynamicconfig" "go.temporal.io/server/service/history/configs" ) // Service represents the history service type Service struct { resource.Resource status int32 handler *Handler params *resource.BootstrapParams config *configs.Config server *grpc.Server } // NewService builds a new history service func NewService( params *resource.BootstrapParams, ) (resource.Resource, error) { serviceConfig := configs.NewConfig(dynamicconfig.NewCollection(params.DynamicConfig, params.Logger), params.PersistenceConfig.NumHistoryShards, params.PersistenceConfig.IsAdvancedVisibilityConfigExist()) params.PersistenceConfig.HistoryMaxConns = serviceConfig.HistoryMgrNumConns() params.PersistenceConfig.VisibilityConfig = &config.VisibilityConfig{ VisibilityOpenMaxQPS: serviceConfig.VisibilityOpenMaxQPS, VisibilityClosedMaxQPS: serviceConfig.VisibilityClosedMaxQPS, EnableSampling: serviceConfig.EnableVisibilitySampling, } visibilityManagerInitializer := func( persistenceBean persistenceClient.Bean, logger log.Logger, ) (persistence.VisibilityManager, error) { visibilityFromDB := persistenceBean.GetVisibilityManager() var visibilityFromES persistence.VisibilityManager if params.ESConfig != nil { visibilityProducer, err := params.MessagingClient.NewProducer(common.VisibilityAppName) if err != nil { logger.Fatal("Creating visibility producer failed", tag.Error(err)) } visibilityFromES = espersistence.NewESVisibilityManager("", nil, nil, visibilityProducer, params.MetricsClient, logger) } return persistence.NewVisibilityManagerWrapper( visibilityFromDB, visibilityFromES, dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // history visibility never read serviceConfig.AdvancedVisibilityWritingMode, ), nil } serviceResource, err := resource.New( params, common.HistoryServiceName, serviceConfig.PersistenceMaxQPS, serviceConfig.PersistenceGlobalMaxQPS, serviceConfig.ThrottledLogRPS, visibilityManagerInitializer, ) if err != nil { return nil, err } return &Service{ Resource: serviceResource, status: common.DaemonStatusInitialized, params: params, config: serviceConfig, }, nil } // Start starts the service func (s *Service) Start() { if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { return } logger := s.GetLogger() logger.Info("elastic search config", tag.ESConfig(s.params.ESConfig)) logger.Info("history starting") s.handler = NewHandler(s.Resource, s.config) // must start resource first s.Resource.Start() s.handler.Start() opts, err := s.params.RPCFactory.GetInternodeGRPCServerOptions() if err != nil { logger.Fatal("creating grpc server options failed", tag.Error(err)) } opts = append( opts, grpc.ChainUnaryInterceptor(rpc.ServiceErrorInterceptor)) s.server = grpc.NewServer(opts...) historyservice.RegisterHistoryServiceServer(s.server, s.handler) healthpb.RegisterHealthServer(s.server, s.handler) listener := s.GetGRPCListener() logger.Info("Starting to serve on history listener") if err := s.server.Serve(listener); err != nil { logger.Fatal("Failed to serve on history listener", tag.Error(err)) } } // Stop stops the service func (s *Service) Stop() { if !atomic.CompareAndSwapInt32(&s.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { return } // initiate graceful shutdown : // 1. remove self from the membership ring // 2. wait for other members to discover we are going down // 3. stop acquiring new shards (periodically or based on other membership changes) // 4. wait for shard ownership to transfer (and inflight requests to drain) while still accepting new requests // 5. Reject all requests arriving at rpc handler to avoid taking on more work except for RespondXXXCompleted and // RecordXXStarted APIs - for these APIs, most of the work is already one and rejecting at last stage is // probably not that desirable. If the shard is closed, these requests will fail anyways. // 6. wait for grace period // 7. force stop the whole world and return const gossipPropagationDelay = 400 * time.Millisecond const shardOwnershipTransferDelay = 5 * time.Second const gracePeriod = 2 * time.Second remainingTime := s.config.ShutdownDrainDuration() s.GetLogger().Info("ShutdownHandler: Evicting self from membership ring") s.GetMembershipMonitor().EvictSelf() s.GetLogger().Info("ShutdownHandler: Waiting for others to discover I am unhealthy") remainingTime = s.sleep(gossipPropagationDelay, remainingTime) s.GetLogger().Info("ShutdownHandler: Initiating shardController shutdown") s.handler.controller.PrepareToStop() s.GetLogger().Info("ShutdownHandler: Waiting for traffic to drain") remainingTime = s.sleep(shardOwnershipTransferDelay, remainingTime) s.GetLogger().Info("ShutdownHandler: No longer taking rpc requests") s.handler.PrepareToStop() remainingTime = s.sleep(gracePeriod, remainingTime) // TODO: Change this to GracefulStop when integration tests are refactored. s.server.Stop() s.handler.Stop() s.Resource.Stop() s.GetLogger().Info("history stopped") } // sleep sleeps for the minimum of desired and available duration // returns the remaining available time duration func (s *Service) sleep(desired time.Duration, available time.Duration) time.Duration { d := common.MinDuration(desired, available) if d > 0 { time.Sleep(d) } return available - d }
1
10,931
Why? I thought this is for graceful shutdown.
temporalio-temporal
go
@@ -59,10 +59,14 @@ module Bolt if result.instance_of? Bolt::TaskResult @stream.puts(indent(2, ::JSON.pretty_generate(result.value))) elsif result.instance_of? Bolt::CommandResult - @stream.puts(indent(2, "STDOUT:")) - @stream.puts(indent(4, result.stdout)) - @stream.puts(indent(2, "STDERR:")) - @stream.puts(indent(4, result.stderr)) + unless result.stdout.strip.empty? + @stream.puts(indent(2, "STDOUT:")) + @stream.puts(indent(4, result.stdout)) + end + unless result.stderr.strip.empty? + @stream.puts(indent(2, "STDERR:")) + @stream.puts(indent(4, result.stderr)) + end end end
1
module Bolt class Outputter class Human < Bolt::Outputter COLORS = { red: "31", green: "32", yellow: "33" }.freeze def print_head; end def colorize(color, string) if @stream.isatty "\033[#{COLORS[color]}m#{string}\033[0m" else string end end def indent(indent, string) indent = ' ' * indent string.gsub(/^/, indent.to_s) end def remove_trail(string) string.sub(/\s\z/, '') end def print_event(node, event) case event[:type] when :node_start print_start(node) when :node_result print_result(node, event[:result]) end end def print_start(node) @stream.puts(colorize(:green, "Started on #{node.host}...")) end def print_result(node, result) if result.success? @stream.puts(colorize(:green, "Finished on #{node.host}:")) else @stream.puts(colorize(:red, "Failed on #{node.host}:")) end if result.error if result.error['msg'] @stream.puts(colorize(:red, remove_trail(indent(2, result.error['msg'])))) else @stream.puts(colorize(:red, remove_trail(indent(2, result.error)))) end end if result.message @stream.puts(remove_trail(indent(2, result.message))) end if result.instance_of? Bolt::TaskResult @stream.puts(indent(2, ::JSON.pretty_generate(result.value))) elsif result.instance_of? Bolt::CommandResult @stream.puts(indent(2, "STDOUT:")) @stream.puts(indent(4, result.stdout)) @stream.puts(indent(2, "STDERR:")) @stream.puts(indent(4, result.stderr)) end end def print_summary(results, elapsed_time) @stream.puts format("Ran on %d node%s in %.2f seconds", results.size, results.size == 1 ? '' : 's', elapsed_time) end def print_plan(result) @stream.puts result end def fatal_error(e); end end end end
1
7,047
Can `result.stdout` or `result.stderr` be nil?
puppetlabs-bolt
rb
@@ -130,7 +130,8 @@ public class PyIssueParserProvider implements BlazeIssueParserProvider { if (projectScope.contains(vf)) { return 0; } - return PythonSdkType.isStdLib(vf, sdk) ? 2 : 1; + return 2; + // return PythonSdkType.isStdLib(vf, sdk) ? 2 : 1; } /** defaults to -1 if no line number can be parsed. */
1
/* * Copyright 2017 The Bazel Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.idea.blaze.python.issueparser; import com.google.common.collect.ImmutableList; import com.google.idea.blaze.base.io.VfsUtils; import com.google.idea.blaze.base.issueparser.BlazeIssueParser; import com.google.idea.blaze.base.issueparser.BlazeIssueParser.Parser; import com.google.idea.blaze.base.issueparser.BlazeIssueParser.SingleLineParser; import com.google.idea.blaze.base.issueparser.BlazeIssueParserProvider; import com.google.idea.blaze.base.model.primitives.WorkspaceRoot; import com.google.idea.blaze.base.scope.output.IssueOutput; import com.google.idea.blaze.python.PySdkUtils; import com.intellij.openapi.fileEditor.OpenFileDescriptor; import com.intellij.openapi.project.Project; import com.intellij.openapi.projectRoots.Sdk; import com.intellij.openapi.util.TextRange; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.pom.Navigatable; import com.intellij.pom.NavigatableAdapter; import com.intellij.psi.PsiFile; import com.intellij.psi.search.FilenameIndex; import com.intellij.psi.search.GlobalSearchScope; import com.jetbrains.python.sdk.PythonSdkType; import java.io.File; import java.util.Arrays; import java.util.Comparator; import java.util.regex.Matcher; import javax.annotation.Nullable; /** Finds python-specific errors in blaze build output. */ public class PyIssueParserProvider implements BlazeIssueParserProvider { @Override public ImmutableList<Parser> getIssueParsers(Project project) { return ImmutableList.of(new PyTracebackIssueParser(project)); } private static class PyTracebackIssueParser extends SingleLineParser { final Project project; final WorkspaceRoot workspaceRoot; PyTracebackIssueParser(Project project) { super("File \"(.*?)\", line ([0-9]+), in (.*)"); this.project = project; this.workspaceRoot = WorkspaceRoot.fromProject(project); } @Nullable @Override protected IssueOutput createIssue(Matcher matcher) { String fileNameOrPath = matcher.group(1); if (fileNameOrPath == null) { return null; } // don't try to find PsiFiles here, just assume it's a relative path File file = BlazeIssueParser.fileFromRelativePath(workspaceRoot, fileNameOrPath); TextRange highlightRange = BlazeIssueParser.union( BlazeIssueParser.fileHighlightRange(matcher, 1), BlazeIssueParser.matchedTextRange(matcher, 2, 2)); return IssueOutput.error(matcher.group(0)) .inFile(file) .onLine(parseLineNumber(matcher.group(2))) .navigatable(openFileNavigatable(fileNameOrPath, parseLineNumber(matcher.group(2)))) .consoleHyperlinkRange(highlightRange) .build(); } private Navigatable openFileNavigatable(String fileName, int line) { return new NavigatableAdapter() { @Override public void navigate(boolean requestFocus) { openFile(fileName, line, requestFocus); } }; } private void openFile(String fileNameOrPath, int line, boolean requestFocus) { VirtualFile vf = findFile(fileNameOrPath); if (vf == null) { return; } new OpenFileDescriptor(project, vf, line - 1, -1).navigate(requestFocus); } @Nullable private VirtualFile findFile(String fileName) { // error messages can include just the file name, or the full workspace-relative path if (fileName.indexOf(File.separatorChar) == 0) { PsiFile file = findFileFromName(project, fileName); if (file != null) { return file.getVirtualFile(); } } File file = BlazeIssueParser.fileFromRelativePath(workspaceRoot, fileName); return file == null ? null : VfsUtils.resolveVirtualFile(file, /* refreshIfNeeded= */ true); } @Nullable private static PsiFile findFileFromName(Project project, String fileName) { GlobalSearchScope projectScope = GlobalSearchScope.projectScope(project); Sdk sdk = PySdkUtils.getPythonSdk(project); return Arrays.stream( FilenameIndex.getFilesByName(project, fileName, GlobalSearchScope.allScope(project))) .min(Comparator.comparingInt((psi) -> rankResult(psi, projectScope, sdk))) .orElse(null); } /** Used to sort search results, in order: {project, library, sdk, no virtual file} */ private static int rankResult(PsiFile file, GlobalSearchScope projectScope, Sdk sdk) { VirtualFile vf = file.getVirtualFile(); if (vf == null) { return 3; } if (projectScope.contains(vf)) { return 0; } return PythonSdkType.isStdLib(vf, sdk) ? 2 : 1; } /** defaults to -1 if no line number can be parsed. */ private static int parseLineNumber(@Nullable String string) { try { return string != null ? Integer.parseInt(string) : -1; } catch (NumberFormatException e) { return -1; } } } }
1
5,850
@mprobst Looks like the `isStdLib` function has been removed in the latest Python plugin. Any thoughts on how to handle this? The new version doesn't seem to have any relevant alternatives.
bazelbuild-intellij
java
@@ -32,6 +32,8 @@ type Config struct { RuleRendererOverride rules.RuleRenderer IPIPMTU int + MaxIPSetSize int + IptablesRefreshInterval time.Duration RulesConfig rules.Config
1
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intdataplane import ( log "github.com/Sirupsen/logrus" "github.com/projectcalico/felix/go/felix/ifacemonitor" "github.com/projectcalico/felix/go/felix/ipsets" "github.com/projectcalico/felix/go/felix/iptables" "github.com/projectcalico/felix/go/felix/jitter" "github.com/projectcalico/felix/go/felix/proto" "github.com/projectcalico/felix/go/felix/routetable" "github.com/projectcalico/felix/go/felix/rules" "github.com/projectcalico/felix/go/felix/set" "time" ) type Config struct { DisableIPv6 bool RuleRendererOverride rules.RuleRenderer IPIPMTU int IptablesRefreshInterval time.Duration RulesConfig rules.Config } // InternalDataplane implements an in-process Felix dataplane driver based on iptables // and ipsets. It communicates with the datastore-facing part of Felix via the // Send/RecvMessage methods, which operate on the protobuf-defined API objects. // // Architecture // // The internal dataplane driver is organised around a main event loop, which handles // update events from the datastore and dataplane. // // Each pass around the main loop has two phases. In the first phase, updates are fanned // out to "manager" objects, which calculate the changes that are needed and pass them to // the dataplane programming layer. In the second phase, the dataplane layer applies the // updates in a consistent sequence. The second phase is skipped until the datastore is // in sync; this ensures that the first update to the dataplane applies a consistent // snapshot. // // Having the dataplane layer batch updates has several advantages. It is much more // efficient to batch updates, since each call to iptables/ipsets has a high fixed cost. // In addition, it allows for different managers to make updates without having to // coordinate on their sequencing. // // Requirements on the API // // The internal dataplane does not do consistency checks on the incoming data (as the // old Python-based driver used to do). It expects to be told about dependent resources // before they are needed and for their lifetime to exceed that of the resources that // depend on them. For example, it is important the the datastore layer send an // IP set create event before it sends a rule that references that IP set. type InternalDataplane struct { toDataplane chan interface{} fromDataplane chan interface{} allIptablesTables []*iptables.Table iptablesNATTables []*iptables.Table iptablesRawTables []*iptables.Table iptablesFilterTables []*iptables.Table ipSetRegistries []*ipsets.Registry ifaceMonitor *ifacemonitor.InterfaceMonitor ifaceUpdates chan *ifaceUpdate ifaceAddrUpdates chan *ifaceAddrsUpdate endpointStatusCombiner *endpointStatusCombiner allManagers []Manager ruleRenderer rules.RuleRenderer interfacePrefixes []string routeTables []*routetable.RouteTable dataplaneNeedsSync bool refreshIptables bool cleanupPending bool config Config } func NewIntDataplaneDriver(config Config) *InternalDataplane { log.WithField("config", config).Info("Creating internal dataplane driver.") ruleRenderer := config.RuleRendererOverride if ruleRenderer == nil { ruleRenderer = rules.NewRenderer(config.RulesConfig) } dp := &InternalDataplane{ toDataplane: make(chan interface{}, 100), fromDataplane: make(chan interface{}, 100), ruleRenderer: ruleRenderer, interfacePrefixes: config.RulesConfig.WorkloadIfacePrefixes, cleanupPending: true, ifaceMonitor: ifacemonitor.New(), ifaceUpdates: make(chan *ifaceUpdate, 100), ifaceAddrUpdates: make(chan *ifaceAddrsUpdate, 100), config: config, } dp.ifaceMonitor.Callback = dp.onIfaceStateChange dp.ifaceMonitor.AddrCallback = dp.onIfaceAddrsChange natTableV4 := iptables.NewTable( "nat", 4, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, rules.HistoricInsertedNATRuleRegex, ) rawTableV4 := iptables.NewTable("raw", 4, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, "") filterTableV4 := iptables.NewTable("filter", 4, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, "") ipSetsConfigV4 := config.RulesConfig.IPSetConfigV4 ipSetRegV4 := ipsets.NewRegistry(ipSetsConfigV4) dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV4) dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV4) dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV4) dp.ipSetRegistries = append(dp.ipSetRegistries, ipSetRegV4) routeTableV4 := routetable.New(config.RulesConfig.WorkloadIfacePrefixes, 4) dp.routeTables = append(dp.routeTables, routeTableV4) dp.endpointStatusCombiner = newEndpointStatusCombiner(dp.fromDataplane, !config.DisableIPv6) dp.RegisterManager(newIPSetsManager(ipSetRegV4)) dp.RegisterManager(newPolicyManager(filterTableV4, ruleRenderer, 4)) dp.RegisterManager(newEndpointManager( filterTableV4, ruleRenderer, routeTableV4, 4, config.RulesConfig.WorkloadIfacePrefixes, dp.endpointStatusCombiner.OnWorkloadEndpointStatusUpdate)) dp.RegisterManager(newMasqManager(ipSetRegV4, natTableV4, ruleRenderer, 1000000, 4)) if config.RulesConfig.IPIPEnabled { // Add a manger to keep the all-hosts IP set up to date. dp.RegisterManager(newIPIPManager(ipSetRegV4, 1000000)) // IPv4-only } if !config.DisableIPv6 { natTableV6 := iptables.NewTable( "nat", 6, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, rules.HistoricInsertedNATRuleRegex, ) rawTableV6 := iptables.NewTable("raw", 6, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, "") filterTableV6 := iptables.NewTable("filter", 6, rules.AllHistoricChainNamePrefixes, rules.RuleHashPrefix, "") ipSetsConfigV6 := config.RulesConfig.IPSetConfigV6 ipSetRegV6 := ipsets.NewRegistry(ipSetsConfigV6) dp.ipSetRegistries = append(dp.ipSetRegistries, ipSetRegV6) dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV6) dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV6) dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV6) routeTableV6 := routetable.New(config.RulesConfig.WorkloadIfacePrefixes, 6) dp.routeTables = append(dp.routeTables, routeTableV6) dp.RegisterManager(newIPSetsManager(ipSetRegV6)) dp.RegisterManager(newPolicyManager(filterTableV6, ruleRenderer, 6)) dp.RegisterManager(newEndpointManager( filterTableV6, ruleRenderer, routeTableV6, 6, config.RulesConfig.WorkloadIfacePrefixes, dp.endpointStatusCombiner.OnWorkloadEndpointStatusUpdate)) dp.RegisterManager(newMasqManager(ipSetRegV6, natTableV6, ruleRenderer, 1000000, 6)) } for _, t := range dp.iptablesNATTables { dp.allIptablesTables = append(dp.allIptablesTables, t) } for _, t := range dp.iptablesFilterTables { dp.allIptablesTables = append(dp.allIptablesTables, t) } for _, t := range dp.iptablesRawTables { dp.allIptablesTables = append(dp.allIptablesTables, t) } return dp } type Manager interface { // OnUpdate is called for each protobuf message from the datastore. May either directly // send updates to the IPSets and iptables.Table objects (which will queue the updates // until the main loop instructs them to act) or (for efficiency) may wait until // a call to CompleteDeferredWork() to flush updates to the dataplane. OnUpdate(protoBufMsg interface{}) // Called before the main loop flushes updates to the dataplane to allow for batched // work to be completed. CompleteDeferredWork() error } func (d *InternalDataplane) RegisterManager(mgr Manager) { d.allManagers = append(d.allManagers, mgr) } func (d *InternalDataplane) Start() { go d.loopUpdatingDataplane() go d.loopReportingStatus() go d.ifaceMonitor.MonitorInterfaces() } // onIfaceStateChange is our interface monitor callback. It gets called from the monitor's thread. func (d *InternalDataplane) onIfaceStateChange(ifaceName string, state ifacemonitor.State) { log.WithFields(log.Fields{ "ifaceName": ifaceName, "state": state, }).Info("Linux interface state changed.") d.ifaceUpdates <- &ifaceUpdate{ Name: ifaceName, State: state, } } type ifaceUpdate struct { Name string State ifacemonitor.State } // onIfaceAddrsChange is our interface address monitor callback. It gets called // from the monitor's thread. func (d *InternalDataplane) onIfaceAddrsChange(ifaceName string, addrs set.Set) { log.WithFields(log.Fields{ "ifaceName": ifaceName, "addrs": addrs, }).Info("Linux interface addrs changed.") d.ifaceAddrUpdates <- &ifaceAddrsUpdate{ Name: ifaceName, Addrs: addrs, } } type ifaceAddrsUpdate struct { Name string Addrs set.Set } func (d *InternalDataplane) SendMessage(msg interface{}) error { d.toDataplane <- msg return nil } func (d *InternalDataplane) RecvMessage() (interface{}, error) { return <-d.fromDataplane, nil } func (d *InternalDataplane) loopUpdatingDataplane() { log.Info("Started internal iptables dataplane driver") // TODO Check global RPF value is sane (can't be "loose"). // Endure that the default value of rp_filter is set to "strict" for newly-created // interfaces. This is required to prevent a race between starting an interface and // Felix being able to configure it. writeProcSys("/proc/sys/net/ipv4/conf/default/rp_filter", "1") for _, t := range d.iptablesFilterTables { filterChains := d.ruleRenderer.StaticFilterTableChains(t.IPVersion) t.UpdateChains(filterChains) t.SetRuleInsertions("FORWARD", []iptables.Rule{{ Action: iptables.JumpAction{rules.ChainFilterForward}, }}) t.SetRuleInsertions("INPUT", []iptables.Rule{{ Action: iptables.JumpAction{rules.ChainFilterInput}, }}) t.SetRuleInsertions("OUTPUT", []iptables.Rule{{ Action: iptables.JumpAction{rules.ChainFilterOutput}, }}) } if d.config.RulesConfig.IPIPEnabled { log.Info("IPIP enabled, starting thread to keep tunnel configuration in sync.") go func() { log.Info("IPIP thread started.") for { err := configureIPIPDevice(d.config.IPIPMTU, d.config.RulesConfig.IPIPTunnelAddress) if err != nil { log.WithError(err).Warn("Failed configure IPIP tunnel device, retrying...") time.Sleep(1 * time.Second) continue } time.Sleep(10 * time.Second) } }() } else { log.Info("IPIP disabled. Not starting tunnel update thread.") } for _, t := range d.iptablesNATTables { t.UpdateChains(d.ruleRenderer.StaticNATTableChains(t.IPVersion)) t.SetRuleInsertions("PREROUTING", []iptables.Rule{{ Action: iptables.JumpAction{rules.ChainNATPrerouting}, }}) t.SetRuleInsertions("POSTROUTING", []iptables.Rule{{ Action: iptables.JumpAction{rules.ChainNATPostrouting}, }}) } // Retry any failed operations every 10s. retryTicker := time.NewTicker(10 * time.Second) var refreshC <-chan time.Time if d.config.IptablesRefreshInterval > 0 { refreshTicker := jitter.NewTicker( d.config.IptablesRefreshInterval, d.config.IptablesRefreshInterval/10, ) refreshC = refreshTicker.C } datastoreInSync := false for { select { case msg := <-d.toDataplane: log.WithField("msg", msg).Info("Received update from calculation graph") for _, mgr := range d.allManagers { mgr.OnUpdate(msg) } switch msg.(type) { case *proto.InSync: log.Info("Datastore in sync, flushing the dataplane for the first time...") datastoreInSync = true } d.dataplaneNeedsSync = true case ifaceUpdate := <-d.ifaceUpdates: log.WithField("msg", ifaceUpdate).Info("Received interface update") for _, mgr := range d.allManagers { mgr.OnUpdate(ifaceUpdate) } for _, routeTable := range d.routeTables { routeTable.OnIfaceStateChanged(ifaceUpdate.Name, ifaceUpdate.State) } d.dataplaneNeedsSync = true case ifaceAddrsUpdate := <-d.ifaceAddrUpdates: log.WithField("msg", ifaceAddrsUpdate).Info("Received interface addresses update") for _, mgr := range d.allManagers { mgr.OnUpdate(ifaceAddrsUpdate) } d.dataplaneNeedsSync = true case <-refreshC: log.Debug("Refreshing iptables dataplane state") d.refreshIptables = true d.dataplaneNeedsSync = true case <-retryTicker.C: } if datastoreInSync && d.dataplaneNeedsSync { d.apply() } } } func (d *InternalDataplane) apply() { // Update sequencing is important here because iptables rules have dependencies on ipsets. // Creating a rule that references an unknown IP set fails, as does deleting an IP set that // is in use. // Unset the needs-sync flag, we'll set it again if something fails. d.dataplaneNeedsSync = false // First, give the managers a chance to update IP sets and iptables. for _, mgr := range d.allManagers { err := mgr.CompleteDeferredWork() if err != nil { d.dataplaneNeedsSync = true } } // Next, create/update IP sets. We defer deletions of IP sets until after we update // iptables. for _, w := range d.ipSetRegistries { w.ApplyUpdates() } if d.refreshIptables { for _, t := range d.allIptablesTables { t.InvalidateDataplaneCache() } d.refreshIptables = false } // Update iptables, this should sever any references to now-unused IP sets. for _, t := range d.allIptablesTables { t.Apply() } // Update the routing table. for _, r := range d.routeTables { err := r.Apply() if err != nil { log.Warn("Failed to synchronize routing table, will retry...") d.dataplaneNeedsSync = true } } // Now clean up any left-over IP sets. for _, w := range d.ipSetRegistries { w.ApplyDeletions() } // And publish and status updates. d.endpointStatusCombiner.Apply() if d.cleanupPending { for _, w := range d.ipSetRegistries { w.AttemptCleanup() } d.cleanupPending = false } } func (d *InternalDataplane) loopReportingStatus() { log.Info("Started internal status report thread") start := time.Now() for { time.Sleep(10 * time.Second) now := time.Now() uptimeNanos := float64(now.Sub(start)) uptimeSecs := uptimeNanos / 1000000000 d.fromDataplane <- &proto.ProcessStatusUpdate{ IsoTimestamp: now.UTC().Format(time.RFC3339), Uptime: uptimeSecs, } } }
1
14,900
How can there be a configured max ipset size? Surely we need to write ipsets with however many members are implied by the Calico datastore?
projectcalico-felix
go
@@ -205,7 +205,7 @@ export default AbstractEditController.extend(IncidentStatuses, FriendlyId, Patie showDeleteAttachment(attachment) { let i18n = get(this, 'i18n'); - let message = i18n.t('incident.messages.deleteAttachment'); + let message = i18n.t('messages.delete_singular', { name: 'attachment' }); let model = Ember.Object.create({ itemToDelete: attachment });
1
import AbstractEditController from 'hospitalrun/controllers/abstract-edit-controller'; import DS from 'ember-data'; import Ember from 'ember'; import FriendlyId from 'hospitalrun/mixins/friendly-id'; import IncidentStatuses, { CLOSED } from 'hospitalrun/mixins/incident-statuses'; import PatientSubmodule from 'hospitalrun/mixins/patient-submodule'; import SelectValues from 'hospitalrun/utils/select-values'; import UserSession from 'hospitalrun/mixins/user-session'; const { PromiseArray, PromiseObject } = DS; const { computed, computed: { alias }, get, inject, set } = Ember; export default AbstractEditController.extend(IncidentStatuses, FriendlyId, PatientSubmodule, SelectValues, UserSession, { lookupListsToUpdate: [{ name: 'incidentDepartmentList', property: 'model.department', id: 'incident_departments' }], sequenceName: 'incident', sequenceView: 'incident_by_friendly_id', updateCapability: 'add_incident', customForms: inject.service(), database: inject.service(), filesystem: inject.service(), lookupLists: inject.service(), customFormsToAdd: alias('customForms.formsForSelect'), customFormsToDisplay: alias('customForms.formsToDisplay'), showAddFormButton: alias('customForms.showAddButton'), incidentController: inject.controller('incident'), canManageIncident: computed('model.{isNew,status}', function() { let canManageIncident = this.currentUserCan('manage_incidents'); let status = get(this, 'model.status'); let isNew = get(this, 'model.isNew'); if (isNew || status === CLOSED) { canManageIncident = false; } return canManageIncident; }), canUpdateStatus: computed('model.isNew', function() { let canManageIncident = this.currentUserCan('manage_incidents'); let isNew = get(this, 'model.isNew'); return canManageIncident && !isNew; }), categoryNameList: computed('[email protected]', function() { return PromiseArray.create({ promise: get(this, 'incidentCategoryList').then((categoryList) => { return categoryList.map((value) => { return { id: get(value, 'incidentCategoryName'), value: get(value, 'incidentCategoryName') }; }); }) }); }), incidentCategoryList: computed(function() { let lookupLists = get(this, 'lookupLists'); return lookupLists.getLookupList('incidentCategories'); }).volatile(), incidentDepartmentList: computed('lookupListsLastUpdate', function() { let lookupLists = get(this, 'lookupLists'); return PromiseObject.create({ promise: lookupLists.getLookupList('incident_departments') }); }).volatile(), incidentStatuses: computed(function() { return get(this, 'statusList').map((status) => { return { id: status, value: this.getLocalizedStatus(status) }; }); }), itemList: computed('model.categoryName', function() { let categoryNameSelected = get(this, 'model.categoryName'); if (!Ember.isEmpty(categoryNameSelected)) { return PromiseArray.create({ promise: get(this, 'incidentCategoryList').then((categoryList) => { let incidentCategory = categoryList.findBy('incidentCategoryName', categoryNameSelected); return get(incidentCategory, 'incidentCategoryItems'); }) }); } }), afterUpdate() { let i18n = get(this, 'i18n'); this.displayAlert(i18n.t('incident.titles.incidentSaved'), i18n.t('incident.messages.saved')); }, beforeUpdate() { let model = get(this, 'model'); set(model, 'modifiedByDisplayName', this.getUserName(false)); if (get(model, 'isNew')) { return this.generateFriendlyId('incident').then((friendlyId) => { set(model, 'friendlyId', friendlyId); }); } else { return Ember.RSVP.resolve(); } }, setupCustomForms() { let customForms = get(this, 'customForms'); let model = get(this, 'model'); customForms.setupForms('incident', model); }, /** * Adds or removes the specified object from the specified list. * @param {String} listName The name of the list to operate on. * @param {Object} listObject The object to add or removed from the * specified list. * @param {boolean} removeObject If true remove the object from the list; * otherwise add the specified object to the list. */ _updateList(listName, listObject, removeObject) { let model = get(this, 'model'); get(model, listName).then(function(list) { if (removeObject) { list.removeObject(listObject); } else { list.addObject(listObject); } this.send('update', true); this.send('closeModal'); }.bind(this)); }, actions: { addNote(newNote) { this._updateList('notes', newNote); }, addAttachment(newAttachment) { this._updateList('incidentAttachments', newAttachment); }, addCustomForm() { let model = get(this, 'model'); let customFormsToAdd = get(this, 'customFormsToAdd'); this.send('openModal', 'custom-form-add', Ember.Object.create({ modelToAddTo: model, customForms: customFormsToAdd })); }, showAddAttachment() { let newNote = get(this, 'store').createRecord('attachment', { dateAdded: new Date(), addedBy: this.getUserName(true), addedByDisplayName: this.getUserName(false), saveToDir: `/incidents/${get(this, 'model.id')}/` }); this.send('openModal', 'incident.attachment', newNote); }, showAddNote() { let newNote = get(this, 'store').createRecord('incident-note', { dateRecorded: new Date(), givenBy: this.getUserName(true), givenByDisplayName: this.getUserName(false) }); this.send('openModal', 'incident.note.edit', newNote); }, deleteAttachment(model) { let attachment = get(model, 'itemToDelete'); this._updateList('incidentAttachments', attachment, true); attachment.destroyRecord().then(() => { let attachmentId = get(attachment, 'id'); let database = get(this, 'database'); let filePath = get(attachment, 'fileName'); let fileSystem = get(this, 'filesystem'); let isFileSystemEnabled = get(fileSystem, 'isFileSystemEnabled'); if (isFileSystemEnabled) { let pouchDbId = database.getPouchId(attachmentId, 'attachment'); fileSystem.deleteFile(filePath, pouchDbId).catch((/* ignored */) => {}); } }); }, deleteNote(note) { this._updateList('notes', note, true); }, showDeleteAttachment(attachment) { let i18n = get(this, 'i18n'); let message = i18n.t('incident.messages.deleteAttachment'); let model = Ember.Object.create({ itemToDelete: attachment }); let title = i18n.t('incident.titles.deleteAttachment'); this.displayConfirm(title, message, 'deleteAttachment', model); }, showDeleteNote(note) { this.send('openModal', 'incident.note.delete', note); }, showEditAttachment(attachment) { this.send('openModal', 'incident.attachment', attachment); }, showEditNote(note) { this.send('openModal', 'incident.note.edit', note); } } });
1
13,481
This code is passing a non localized string when it should be passing in a localized string or it should use the name of the item being deleted.
HospitalRun-hospitalrun-frontend
js
@@ -75,6 +75,14 @@ public class ExpressionVisitors { return null; } + public <T> R isNaN(BoundReference<T> ref) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not implement isNaN"); + } + + public <T> R notNaN(BoundReference<T> ref) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not implement notNaN"); + } + public <T> R lt(BoundReference<T> ref, Literal<T> lit) { return null; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.expressions; import java.util.Set; import org.apache.iceberg.exceptions.ValidationException; /** * Utils for traversing {@link Expression expressions}. */ public class ExpressionVisitors { private ExpressionVisitors() { } public abstract static class ExpressionVisitor<R> { public R alwaysTrue() { return null; } public R alwaysFalse() { return null; } public R not(R result) { return null; } public R and(R leftResult, R rightResult) { return null; } public R or(R leftResult, R rightResult) { return null; } public <T> R predicate(BoundPredicate<T> pred) { return null; } public <T> R predicate(UnboundPredicate<T> pred) { return null; } } /** * This base class is used by existing visitors that have not been updated to extend BoundExpressionVisitor. * * @deprecated use {@link BoundVisitor} instead */ @Deprecated public abstract static class BoundExpressionVisitor<R> extends ExpressionVisitor<R> { public <T> R isNull(BoundReference<T> ref) { return null; } public <T> R notNull(BoundReference<T> ref) { return null; } public <T> R lt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R ltEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gtEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R eq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R notEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R in(BoundReference<T> ref, Set<T> literalSet) { throw new UnsupportedOperationException("In operation is not supported by the visitor"); } public <T> R notIn(BoundReference<T> ref, Set<T> literalSet) { throw new UnsupportedOperationException("notIn operation is not supported by the visitor"); } public <T> R startsWith(BoundReference<T> ref, Literal<T> lit) { throw new UnsupportedOperationException("Unsupported operation."); } @Override public <T> R predicate(BoundPredicate<T> pred) { ValidationException.check(pred.term() instanceof BoundReference, "Visitor %s does not support expression: %s", this, pred.term()); if (pred.isLiteralPredicate()) { BoundLiteralPredicate<T> literalPred = pred.asLiteralPredicate(); switch (pred.op()) { case LT: return lt((BoundReference<T>) pred.term(), literalPred.literal()); case LT_EQ: return ltEq((BoundReference<T>) pred.term(), literalPred.literal()); case GT: return gt((BoundReference<T>) pred.term(), literalPred.literal()); case GT_EQ: return gtEq((BoundReference<T>) pred.term(), literalPred.literal()); case EQ: return eq((BoundReference<T>) pred.term(), literalPred.literal()); case NOT_EQ: return notEq((BoundReference<T>) pred.term(), literalPred.literal()); case STARTS_WITH: return startsWith((BoundReference<T>) pred.term(), literalPred.literal()); default: throw new IllegalStateException("Invalid operation for BoundLiteralPredicate: " + pred.op()); } } else if (pred.isUnaryPredicate()) { switch (pred.op()) { case IS_NULL: return isNull((BoundReference<T>) pred.term()); case NOT_NULL: return notNull((BoundReference<T>) pred.term()); default: throw new IllegalStateException("Invalid operation for BoundUnaryPredicate: " + pred.op()); } } else if (pred.isSetPredicate()) { switch (pred.op()) { case IN: return in((BoundReference<T>) pred.term(), pred.asSetPredicate().literalSet()); case NOT_IN: return notIn((BoundReference<T>) pred.term(), pred.asSetPredicate().literalSet()); default: throw new IllegalStateException("Invalid operation for BoundSetPredicate: " + pred.op()); } } throw new IllegalStateException("Unsupported bound predicate: " + pred.getClass().getName()); } @Override public <T> R predicate(UnboundPredicate<T> pred) { throw new UnsupportedOperationException("Not a bound predicate: " + pred); } } public abstract static class BoundVisitor<R> extends ExpressionVisitor<R> { public <T> R isNull(Bound<T> expr) { return null; } public <T> R notNull(Bound<T> expr) { return null; } public <T> R lt(Bound<T> expr, Literal<T> lit) { return null; } public <T> R ltEq(Bound<T> expr, Literal<T> lit) { return null; } public <T> R gt(Bound<T> expr, Literal<T> lit) { return null; } public <T> R gtEq(Bound<T> expr, Literal<T> lit) { return null; } public <T> R eq(Bound<T> expr, Literal<T> lit) { return null; } public <T> R notEq(Bound<T> expr, Literal<T> lit) { return null; } public <T> R in(Bound<T> expr, Set<T> literalSet) { throw new UnsupportedOperationException("In operation is not supported by the visitor"); } public <T> R notIn(Bound<T> expr, Set<T> literalSet) { throw new UnsupportedOperationException("notIn operation is not supported by the visitor"); } public <T> R startsWith(Bound<T> expr, Literal<T> lit) { throw new UnsupportedOperationException("Unsupported operation."); } @Override public <T> R predicate(BoundPredicate<T> pred) { if (pred.isLiteralPredicate()) { BoundLiteralPredicate<T> literalPred = pred.asLiteralPredicate(); switch (pred.op()) { case LT: return lt(pred.term(), literalPred.literal()); case LT_EQ: return ltEq(pred.term(), literalPred.literal()); case GT: return gt(pred.term(), literalPred.literal()); case GT_EQ: return gtEq(pred.term(), literalPred.literal()); case EQ: return eq(pred.term(), literalPred.literal()); case NOT_EQ: return notEq(pred.term(), literalPred.literal()); case STARTS_WITH: return startsWith(pred.term(), literalPred.literal()); default: throw new IllegalStateException("Invalid operation for BoundLiteralPredicate: " + pred.op()); } } else if (pred.isUnaryPredicate()) { switch (pred.op()) { case IS_NULL: return isNull(pred.term()); case NOT_NULL: return notNull(pred.term()); default: throw new IllegalStateException("Invalid operation for BoundUnaryPredicate: " + pred.op()); } } else if (pred.isSetPredicate()) { switch (pred.op()) { case IN: return in(pred.term(), pred.asSetPredicate().literalSet()); case NOT_IN: return notIn(pred.term(), pred.asSetPredicate().literalSet()); default: throw new IllegalStateException("Invalid operation for BoundSetPredicate: " + pred.op()); } } throw new IllegalStateException("Unsupported bound predicate: " + pred.getClass().getName()); } @Override public <T> R predicate(UnboundPredicate<T> pred) { throw new UnsupportedOperationException("Not a bound predicate: " + pred); } } /** * Traverses the given {@link Expression expression} with a {@link ExpressionVisitor visitor}. * <p> * The visitor will be called to handle each node in the expression tree in postfix order. Result * values produced by child nodes are passed when parent nodes are handled. * * @param expr an expression to traverse * @param visitor a visitor that will be called to handle each node in the expression tree * @param <R> the return type produced by the expression visitor * @return the value returned by the visitor for the root expression node */ public static <R> R visit(Expression expr, ExpressionVisitor<R> visitor) { if (expr instanceof Predicate) { if (expr instanceof BoundPredicate) { return visitor.predicate((BoundPredicate<?>) expr); } else { return visitor.predicate((UnboundPredicate<?>) expr); } } else { switch (expr.op()) { case TRUE: return visitor.alwaysTrue(); case FALSE: return visitor.alwaysFalse(); case NOT: Not not = (Not) expr; return visitor.not(visit(not.child(), visitor)); case AND: And and = (And) expr; return visitor.and(visit(and.left(), visitor), visit(and.right(), visitor)); case OR: Or or = (Or) expr; return visitor.or(visit(or.left(), visitor), visit(or.right(), visitor)); default: throw new UnsupportedOperationException( "Unknown operation: " + expr.op()); } } } /** * Traverses the given {@link Expression expression} with a {@link ExpressionVisitor visitor}. * <p> * The visitor will be called to handle only nodes required for determining result * in the expression tree in postfix order. Result values produced by child nodes * are passed when parent nodes are handled. * * @param expr an expression to traverse * @param visitor a visitor that will be called to handle each node in the expression tree * @return the value returned by the visitor for the root expression node */ public static Boolean visitEvaluator(Expression expr, ExpressionVisitor<Boolean> visitor) { if (expr instanceof Predicate) { if (expr instanceof BoundPredicate) { return visitor.predicate((BoundPredicate<?>) expr); } else { return visitor.predicate((UnboundPredicate<?>) expr); } } else { switch (expr.op()) { case TRUE: return visitor.alwaysTrue(); case FALSE: return visitor.alwaysFalse(); case NOT: Not not = (Not) expr; return visitor.not(visitEvaluator(not.child(), visitor)); case AND: And and = (And) expr; Boolean andLeftOperand = visitEvaluator(and.left(), visitor); if (!andLeftOperand) { return visitor.alwaysFalse(); } return visitor.and(Boolean.TRUE, visitEvaluator(and.right(), visitor)); case OR: Or or = (Or) expr; Boolean orLeftOperand = visitEvaluator(or.left(), visitor); if (orLeftOperand) { return visitor.alwaysTrue(); } return visitor.or(Boolean.FALSE, visitEvaluator(or.right(), visitor)); default: throw new UnsupportedOperationException( "Unknown operation: " + expr.op()); } } } }
1
27,678
why not be consistent with `isNull` and `notNull` and return null?
apache-iceberg
java
@@ -41,6 +41,7 @@ type BackendServiceConfig struct { *Logging `yaml:"logging,flow"` Sidecars map[string]*SidecarConfig `yaml:"sidecars"` Network NetworkConfig `yaml:"network"` + DockerLabels map[string]string `yaml:"docker_labels,flow"` } type imageWithPortAndHealthcheck struct {
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package manifest import ( "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/copilot-cli/internal/pkg/template" "github.com/imdario/mergo" ) const ( backendSvcManifestPath = "workloads/services/backend/manifest.yml" ) // BackendServiceProps represents the configuration needed to create a backend service. type BackendServiceProps struct { WorkloadProps Port uint16 HealthCheck *ContainerHealthCheck // Optional healthcheck configuration. } // BackendService holds the configuration to create a backend service manifest. type BackendService struct { Workload `yaml:",inline"` BackendServiceConfig `yaml:",inline"` // Use *BackendServiceConfig because of https://github.com/imdario/mergo/issues/146 Environments map[string]*BackendServiceConfig `yaml:",flow"` parser template.Parser } // BackendServiceConfig holds the configuration that can be overriden per environments. type BackendServiceConfig struct { ImageConfig imageWithPortAndHealthcheck `yaml:"image,flow"` ImageOverride `yaml:",inline"` TaskConfig `yaml:",inline"` *Logging `yaml:"logging,flow"` Sidecars map[string]*SidecarConfig `yaml:"sidecars"` Network NetworkConfig `yaml:"network"` } type imageWithPortAndHealthcheck struct { ServiceImageWithPort `yaml:",inline"` HealthCheck *ContainerHealthCheck `yaml:"healthcheck"` } // ContainerHealthCheck holds the configuration to determine if the service container is healthy. // See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html type ContainerHealthCheck struct { Command []string `yaml:"command"` Interval *time.Duration `yaml:"interval"` Retries *int `yaml:"retries"` Timeout *time.Duration `yaml:"timeout"` StartPeriod *time.Duration `yaml:"start_period"` } // NewBackendService applies the props to a default backend service configuration with // minimal task sizes, single replica, no healthcheck, and then returns it. func NewBackendService(props BackendServiceProps) *BackendService { svc := newDefaultBackendService() var healthCheck *ContainerHealthCheck if props.HealthCheck != nil { // Create the healthcheck field only if the caller specified a healthcheck. healthCheck = newDefaultContainerHealthCheck() healthCheck.apply(props.HealthCheck) } // Apply overrides. svc.Name = aws.String(props.Name) svc.BackendServiceConfig.ImageConfig.Image.Location = stringP(props.Image) svc.BackendServiceConfig.ImageConfig.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile) svc.BackendServiceConfig.ImageConfig.Port = uint16P(props.Port) svc.BackendServiceConfig.ImageConfig.HealthCheck = healthCheck svc.parser = template.New() return svc } // MarshalBinary serializes the manifest object into a binary YAML document. // Implements the encoding.BinaryMarshaler interface. func (s *BackendService) MarshalBinary() ([]byte, error) { content, err := s.parser.Parse(backendSvcManifestPath, *s, template.WithFuncs(map[string]interface{}{ "fmtSlice": template.FmtSliceFunc, "quoteSlice": template.QuoteSliceFunc, "dirName": tplDirName, })) if err != nil { return nil, err } return content.Bytes(), nil } // BuildRequired returns if the service requires building from the local Dockerfile. func (s *BackendService) BuildRequired() (bool, error) { return requiresBuild(s.ImageConfig.Image) } // BuildArgs returns a docker.BuildArguments object for the service given a workspace root directory func (s *BackendService) BuildArgs(wsRoot string) *DockerBuildArgs { return s.ImageConfig.BuildConfig(wsRoot) } // ApplyEnv returns the service manifest with environment overrides. // If the environment passed in does not have any overrides then it returns itself. func (s BackendService) ApplyEnv(envName string) (*BackendService, error) { overrideConfig, ok := s.Environments[envName] if !ok { return &s, nil } // Apply overrides to the original service s. err := mergo.Merge(&s, BackendService{ BackendServiceConfig: *overrideConfig, }, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue) if err != nil { return nil, err } s.Environments = nil return &s, nil } // newDefaultBackendService returns a backend service with minimal task sizes and a single replica. func newDefaultBackendService() *BackendService { return &BackendService{ Workload: Workload{ Type: aws.String(BackendServiceType), }, BackendServiceConfig: BackendServiceConfig{ ImageConfig: imageWithPortAndHealthcheck{}, TaskConfig: TaskConfig{ CPU: aws.Int(256), Memory: aws.Int(512), Count: Count{ Value: aws.Int(1), }, ExecuteCommand: ExecuteCommand{ Enable: aws.Bool(false), }, }, Network: NetworkConfig{ VPC: vpcConfig{ Placement: stringP(PublicSubnetPlacement), }, }, }, } } // newDefaultContainerHealthCheck returns container health check configuration // that's identical to a load balanced web service's defaults. func newDefaultContainerHealthCheck() *ContainerHealthCheck { return &ContainerHealthCheck{ Command: []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}, Interval: durationp(10 * time.Second), Retries: aws.Int(2), Timeout: durationp(5 * time.Second), StartPeriod: durationp(0 * time.Second), } } // apply overrides the healthcheck's fields if other has them set. func (hc *ContainerHealthCheck) apply(other *ContainerHealthCheck) { if other.Command != nil { hc.Command = other.Command } if other.Interval != nil { hc.Interval = other.Interval } if other.Retries != nil { hc.Retries = other.Retries } if other.Timeout != nil { hc.Timeout = other.Timeout } if other.StartPeriod != nil { hc.StartPeriod = other.StartPeriod } } // applyIfNotSet changes the healthcheck's fields only if they were not set and the other healthcheck has them set. func (hc *ContainerHealthCheck) applyIfNotSet(other *ContainerHealthCheck) { if hc.Command == nil && other.Command != nil { hc.Command = other.Command } if hc.Interval == nil && other.Interval != nil { hc.Interval = other.Interval } if hc.Retries == nil && other.Retries != nil { hc.Retries = other.Retries } if hc.Timeout == nil && other.Timeout != nil { hc.Timeout = other.Timeout } if hc.StartPeriod == nil && other.StartPeriod != nil { hc.StartPeriod = other.StartPeriod } } // HealthCheckOpts converts the image's healthcheck configuration into a format parsable by the templates pkg. func (i imageWithPortAndHealthcheck) HealthCheckOpts() *ecs.HealthCheck { if i.HealthCheck == nil { return nil } return &ecs.HealthCheck{ Command: aws.StringSlice(i.HealthCheck.Command), Interval: aws.Int64(int64(i.HealthCheck.Interval.Seconds())), Retries: aws.Int64(int64(*i.HealthCheck.Retries)), StartPeriod: aws.Int64(int64(i.HealthCheck.StartPeriod.Seconds())), Timeout: aws.Int64(int64(i.HealthCheck.Timeout.Seconds())), } }
1
17,181
Did we forget to remove this field?
aws-copilot-cli
go
@@ -60,4 +60,9 @@ public class BaseRefreshableMap<K, V extends IRefreshable> extends HashMap<K, V> public BaseRefreshableMap<K, V> clone() { return (BaseRefreshableMap<K, V>) super.clone(); } + + @Override + public int elementCount() { + return this.values().stream().mapToInt(elem -> elem.elementCount()).sum(); + } }
1
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import java.util.HashMap; import java.util.HashSet; import java.util.Set; /** * Template Base Class to be capable to refresh Map Items from source object */ public class BaseRefreshableMap<K, V extends IRefreshable> extends HashMap<K, V> implements IRefreshable<BaseRefreshableMap<K, V>> { public BaseRefreshableMap<K, V> add(K key, V ramp) { this.put(key, ramp); return this; } public BaseRefreshableMap<K, V> delete(K id) { this.remove(id); return this; } @Override public BaseRefreshableMap<K, V> refresh(BaseRefreshableMap<K, V> source) { Set<K> mergedKeys = new HashSet(); mergedKeys.addAll(this.keySet()); mergedKeys.addAll(source.keySet()); mergedKeys.stream().forEach(key -> { if (this.containsKey(key)) { if (source.containsKey(key)) { this.get(key).refresh(source.get(key)); } else { this.remove(key); } } else { this.add(key, source.get(key)); } }); return this; } @Override public BaseRefreshableMap<K, V> clone() { return (BaseRefreshableMap<K, V>) super.clone(); } }
1
19,305
What if there is no element?
azkaban-azkaban
java
@@ -52,8 +52,15 @@ func (c *Client) uploadAction(target *core.BuildTarget, isTest, isRun bool) (*pb return command, digest, err } -// buildAction creates a build action for a target and returns the command and the action digest digest. No uploading is done. +// buildAction creates a build action for a target and returns the command and the action digest. No uploading is done. func (c *Client) buildAction(target *core.BuildTarget, isTest, stamp bool) (*pb.Command, *pb.Digest, error) { + c.buildActionMutex.Lock() + defer c.buildActionMutex.Unlock() + + if d, ok := c.buildActions[target.Label]; ok { + return d.command, d.actionDigest, nil + } + inputRoot, err := c.uploadInputs(nil, target, isTest) if err != nil { return nil, nil, err
1
package remote import ( "context" "encoding/hex" "fmt" "os" "path" "runtime" "sort" "strings" "time" "github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker" "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" "github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata" "github.com/bazelbuild/remote-apis-sdks/go/pkg/tree" pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/golang/protobuf/ptypes" "github.com/thought-machine/please/src/core" "github.com/thought-machine/please/src/fs" ) // uploadAction uploads a build action for a target and returns its digest. func (c *Client) uploadAction(target *core.BuildTarget, isTest, isRun bool) (*pb.Command, *pb.Digest, error) { var command *pb.Command var digest *pb.Digest err := c.uploadBlobs(func(ch chan<- *chunker.Chunker) error { defer close(ch) inputRoot, err := c.uploadInputs(ch, target, isTest || isRun) if err != nil { return err } inputRootChunker, _ := chunker.NewFromProto(inputRoot, int(c.client.ChunkMaxSize)) ch <- inputRootChunker command, err = c.buildCommand(target, inputRoot, isTest, isRun, target.Stamp) if err != nil { return err } commandChunker, _ := chunker.NewFromProto(command, int(c.client.ChunkMaxSize)) ch <- commandChunker actionChunker, _ := chunker.NewFromProto(&pb.Action{ CommandDigest: commandChunker.Digest().ToProto(), InputRootDigest: inputRootChunker.Digest().ToProto(), Timeout: ptypes.DurationProto(timeout(target, isTest)), }, int(c.client.ChunkMaxSize)) ch <- actionChunker digest = actionChunker.Digest().ToProto() return nil }) return command, digest, err } // buildAction creates a build action for a target and returns the command and the action digest digest. No uploading is done. func (c *Client) buildAction(target *core.BuildTarget, isTest, stamp bool) (*pb.Command, *pb.Digest, error) { inputRoot, err := c.uploadInputs(nil, target, isTest) if err != nil { return nil, nil, err } inputRootDigest := c.digestMessage(inputRoot) command, err := c.buildCommand(target, inputRoot, isTest, false, stamp) if err != nil { return nil, nil, err } commandDigest := c.digestMessage(command) actionDigest := c.digestMessage(&pb.Action{ CommandDigest: commandDigest, InputRootDigest: inputRootDigest, Timeout: ptypes.DurationProto(timeout(target, isTest)), }) return command, actionDigest, nil } // buildStampedAndUnstampedAction builds both a stamped and unstamped version of the action for a target, if it // needs stamping, otherwise it returns the same one twice. func (c *Client) buildStampedAndUnstampedAction(target *core.BuildTarget) (command *pb.Command, stamped, unstamped *pb.Digest, err error) { command, unstampedDigest, err := c.buildAction(target, false, false) if !target.Stamp || err != nil { return command, unstampedDigest, unstampedDigest, err } command, stampedDigest, err := c.buildAction(target, false, true) return command, stampedDigest, unstampedDigest, err } // buildCommand builds the command for a single target. func (c *Client) buildCommand(target *core.BuildTarget, inputRoot *pb.Directory, isTest, isRun, stamp bool) (*pb.Command, error) { if isTest { return c.buildTestCommand(target) } else if isRun { return c.buildRunCommand(target) } // We can't predict what variables like this should be so we sneakily bung something on // the front of the command. It'd be nicer if there were a better way though... var commandPrefix = "export TMP_DIR=\"`pwd`\" && " // TODO(peterebden): Remove this nonsense once API v2.1 is released. files, dirs := outputs(target) if len(target.Outputs()) == 1 { // $OUT is relative when running remotely; make it absolute commandPrefix += `export OUT="$TMP_DIR/$OUT" && ` } if target.IsRemoteFile { // Synthesize something for the Command proto. We never execute this, but it does get hashed for caching // purposes so it's useful to have it be a minimal expression of what we care about (for example, it should // not include the environment variables since we don't communicate those to the remote server). return &pb.Command{ Arguments: []string{ "fetch", strings.Join(target.AllURLs(c.state.Config), " "), "verify", strings.Join(target.Hashes, " "), }, OutputFiles: files, OutputDirectories: dirs, OutputPaths: append(files, dirs...), }, nil } cmd := target.GetCommand(c.state) if cmd == "" { cmd = "true" } cmd, err := core.ReplaceSequences(c.state, target, cmd) return &pb.Command{ Platform: c.platform, // We have to run everything through bash since our commands are arbitrary. // Unfortunately we can't just say "bash", we need an absolute path which is // a bit weird since it assumes that our absolute path is the same as the // remote one (which is probably OK on the same OS, but not between say Linux and // FreeBSD where bash is not idiomatically in the same place). Arguments: []string{ c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd, }, EnvironmentVariables: c.buildEnv(target, c.stampedBuildEnvironment(target, inputRoot, stamp), target.Sandbox), OutputFiles: files, OutputDirectories: dirs, OutputPaths: append(files, dirs...), }, err } // stampedBuildEnvironment returns a build environment, optionally with a stamp if stamp is true. func (c *Client) stampedBuildEnvironment(target *core.BuildTarget, inputRoot *pb.Directory, stamp bool) []string { if target.IsFilegroup { return core.GeneralBuildEnvironment(c.state.Config) // filegroups don't need a full build environment } else if !stamp { return core.BuildEnvironment(c.state, target, ".") } // We generate the stamp ourselves from the input root. // TODO(peterebden): it should include the target properties too... hash := c.sum(mustMarshal(inputRoot)) return core.StampedBuildEnvironment(c.state, target, hash, ".") } // buildTestCommand builds a command for a target when testing. func (c *Client) buildTestCommand(target *core.BuildTarget) (*pb.Command, error) { // TODO(peterebden): Remove all this nonsense once API v2.1 is released. files := make([]string, 0, 2) dirs := []string{} if target.NeedCoverage(c.state) { files = append(files, core.CoverageFile) } if !target.NoTestOutput { if target.HasLabel(core.TestResultsDirLabel) { dirs = []string{core.TestResultsFile} } else { files = append(files, core.TestResultsFile) } } const commandPrefix = "export TMP_DIR=\"`pwd`\" TEST_DIR=\"`pwd`\" && " cmd, err := core.ReplaceTestSequences(c.state, target, target.GetTestCommand(c.state)) if len(c.state.TestArgs) != 0 { cmd += " " + strings.Join(c.state.TestArgs, " ") } return &pb.Command{ Platform: &pb.Platform{ Properties: []*pb.Platform_Property{ { Name: "OSFamily", Value: translateOS(target.Subrepo), }, }, }, Arguments: []string{ c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd, }, EnvironmentVariables: c.buildEnv(nil, core.TestEnvironment(c.state, target, "."), target.TestSandbox), OutputFiles: files, OutputDirectories: dirs, OutputPaths: append(files, dirs...), }, err } // buildRunCommand builds the command to run a target remotely. func (c *Client) buildRunCommand(target *core.BuildTarget) (*pb.Command, error) { outs := target.Outputs() if len(outs) == 0 { return nil, fmt.Errorf("Target %s has no outputs, it can't be run with `plz run`", target) } return &pb.Command{ Platform: c.platform, Arguments: outs, EnvironmentVariables: c.buildEnv(target, core.GeneralBuildEnvironment(c.state.Config), false), }, nil } // uploadInputs finds and uploads a set of inputs from a target. func (c *Client) uploadInputs(ch chan<- *chunker.Chunker, target *core.BuildTarget, isTest bool) (*pb.Directory, error) { if target.IsRemoteFile { return &pb.Directory{}, nil } b, err := c.uploadInputDir(ch, target, isTest) if err != nil { return nil, err } return b.Root(ch), nil } func (c *Client) uploadInputDir(ch chan<- *chunker.Chunker, target *core.BuildTarget, isTest bool) (*dirBuilder, error) { b := newDirBuilder(c) for input := range c.iterInputs(target, isTest, target.IsFilegroup) { if l := input.Label(); l != nil { o := c.targetOutputs(*l) if o == nil { if dep := c.state.Graph.TargetOrDie(*l); dep.Local { // We have built this locally, need to upload its outputs if err := c.uploadLocalTarget(dep); err != nil { return nil, err } o = c.targetOutputs(*l) } else { // Classic "we shouldn't get here" stuff return nil, fmt.Errorf("Outputs not known for %s (should be built by now)", *l) } } pkgName := l.PackageName if target.IsFilegroup { pkgName = target.Label.PackageName } else if isTest && *l == target.Label { // At test time the target itself is put at the root rather than in the normal dir. // This is just How Things Are, so mimic it here. pkgName = "." } // Recall that (as noted in setOutputs) these can have full paths on them, which // we now need to sort out again to create well-formed Directory protos. for _, f := range o.Files { d := b.Dir(path.Join(pkgName, path.Dir(f.Name))) d.Files = append(d.Files, &pb.FileNode{ Name: path.Base(f.Name), Digest: f.Digest, IsExecutable: f.IsExecutable, }) } for _, d := range o.Directories { dir := b.Dir(path.Join(pkgName, path.Dir(d.Name))) dir.Directories = append(dir.Directories, &pb.DirectoryNode{ Name: path.Base(d.Name), Digest: d.Digest, }) if target.IsFilegroup { if err := c.addChildDirs(b, path.Join(pkgName, d.Name), d.Digest); err != nil { return b, err } } } for _, s := range o.Symlinks { d := b.Dir(path.Join(pkgName, path.Dir(s.Name))) d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{ Name: path.Base(s.Name), Target: s.Target, }) } continue } if err := c.uploadInput(b, ch, input); err != nil { return nil, err } } if !isTest && target.Stamp { stamp := core.StampFile(target) chomk := chunker.NewFromBlob(stamp, int(c.client.ChunkMaxSize)) if ch != nil { ch <- chomk } d := b.Dir(".") d.Files = append(d.Files, &pb.FileNode{ Name: target.StampFileName(), Digest: chomk.Digest().ToProto(), }) } return b, nil } // addChildDirs adds a set of child directories to a builder. func (c *Client) addChildDirs(b *dirBuilder, name string, dg *pb.Digest) error { dir := &pb.Directory{} if err := c.client.ReadProto(context.Background(), digest.NewFromProtoUnvalidated(dg), dir); err != nil { return err } d := b.Dir(name) d.Directories = append(d.Directories, dir.Directories...) d.Files = append(d.Files, dir.Files...) d.Symlinks = append(d.Symlinks, dir.Symlinks...) for _, subdir := range dir.Directories { if err := c.addChildDirs(b, path.Join(name, subdir.Name), subdir.Digest); err != nil { return err } } return nil } // uploadInput finds and uploads a single input. func (c *Client) uploadInput(b *dirBuilder, ch chan<- *chunker.Chunker, input core.BuildInput) error { fullPaths := input.FullPaths(c.state.Graph) for i, out := range input.Paths(c.state.Graph) { in := fullPaths[i] if err := fs.Walk(in, func(name string, isDir bool) error { if isDir { return nil // nothing to do } dest := path.Join(out, name[len(in):]) d := b.Dir(path.Dir(dest)) // Now handle the file itself info, err := os.Lstat(name) if err != nil { return err } if info.Mode()&os.ModeSymlink != 0 { link, err := os.Readlink(name) if err != nil { return err } d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{ Name: path.Base(dest), Target: link, }) return nil } h, err := c.state.PathHasher.Hash(name, false, true) if err != nil { return err } dg := &pb.Digest{ Hash: hex.EncodeToString(h), SizeBytes: info.Size(), } d.Files = append(d.Files, &pb.FileNode{ Name: path.Base(dest), Digest: dg, IsExecutable: info.Mode()&0100 != 0, }) if ch != nil { ch <- chunker.NewFromFile(name, digest.NewFromProtoUnvalidated(dg), int(c.client.ChunkMaxSize)) } return nil }); err != nil { return err } } return nil } // iterInputs yields all the input files needed for a target. func (c *Client) iterInputs(target *core.BuildTarget, isTest, isFilegroup bool) <-chan core.BuildInput { if !isTest { return core.IterInputs(c.state.Graph, target, true, isFilegroup) } ch := make(chan core.BuildInput) go func() { ch <- target.Label for _, datum := range target.AllData() { ch <- datum } close(ch) }() return ch } // buildMetadata converts an ActionResult into one of our BuildMetadata protos. // N.B. this always returns a non-nil metadata object for the first response. func (c *Client) buildMetadata(ar *pb.ActionResult, needStdout, needStderr bool) (*core.BuildMetadata, error) { metadata := &core.BuildMetadata{ Stdout: ar.StdoutRaw, Stderr: ar.StderrRaw, } if needStdout && len(metadata.Stdout) == 0 && ar.StdoutDigest != nil { b, err := c.client.ReadBlob(context.Background(), digest.NewFromProtoUnvalidated(ar.StdoutDigest)) if err != nil { return metadata, err } metadata.Stdout = b } if needStderr && len(metadata.Stderr) == 0 && ar.StderrDigest != nil { b, err := c.client.ReadBlob(context.Background(), digest.NewFromProtoUnvalidated(ar.StderrDigest)) if err != nil { return metadata, err } metadata.Stderr = b } return metadata, nil } // digestForFilename returns the digest for an output of the given name, or nil if it doesn't exist. func (c *Client) digestForFilename(ar *pb.ActionResult, name string) *pb.Digest { for _, file := range ar.OutputFiles { if file.Path == name { return file.Digest } } return nil } // downloadAllFiles returns the contents of all files in the given action result func (c *Client) downloadAllPrefixedFiles(ar *pb.ActionResult, prefix string) ([][]byte, error) { outs, err := c.client.FlattenActionOutputs(context.Background(), ar) if err != nil { return nil, err } ret := [][]byte{} for name, out := range outs { if strings.HasPrefix(name, prefix) { blob, err := c.client.ReadBlob(context.Background(), out.Digest) if err != nil { return nil, err } ret = append(ret, blob) } } return ret, nil } // verifyActionResult verifies that all the requested outputs actually exist in a returned // ActionResult. Servers do not necessarily verify this but we need to make sure they are // complete for future requests. func (c *Client) verifyActionResult(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest, ar *pb.ActionResult, verifyOutputs bool) error { outs := make(map[string]bool, len(ar.OutputFiles)+len(ar.OutputDirectories)+len(ar.OutputFileSymlinks)+len(ar.OutputDirectorySymlinks)) for _, f := range ar.OutputFiles { outs[f.Path] = true } for _, f := range ar.OutputDirectories { outs[f.Path] = true } for _, f := range ar.OutputFileSymlinks { outs[f.Path] = true } for _, f := range ar.OutputDirectorySymlinks { outs[f.Path] = true } for _, out := range command.OutputFiles { if !outs[out] { return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true)) } } for _, out := range command.OutputDirectories { if !outs[out] { return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true)) } } if !verifyOutputs { return nil } start := time.Now() // Do more in-depth validation that blobs exist remotely. outputs, err := c.client.FlattenActionOutputs(context.Background(), ar) if err != nil { return fmt.Errorf("Failed to verify action result: %s", err) } // At this point it's verified all the directories, but not the files themselves. digests := make([]digest.Digest, 0, len(outputs)) for _, output := range outputs { // FlattenTree doesn't populate the digest in for empty dirs... we don't need to check them anyway if !output.IsEmptyDirectory { digests = append(digests, output.Digest) } } if missing, err := c.client.MissingBlobs(context.Background(), digests); err != nil { return fmt.Errorf("Failed to verify action result outputs: %s", err) } else if len(missing) != 0 { return fmt.Errorf("Action result missing %d blobs", len(missing)) } log.Debug("Verified action result for %s in %s", target, time.Since(start)) return nil } // uploadLocalTarget uploads the outputs of a target that was built locally. func (c *Client) uploadLocalTarget(target *core.BuildTarget) error { m, ar, err := tree.ComputeOutputsToUpload(target.OutDir(), target.Outputs(), int(c.client.ChunkMaxSize), filemetadata.NewNoopCache()) if err != nil { return err } chomks := make([]*chunker.Chunker, 0, len(m)) for _, c := range m { chomks = append(chomks, c) } if err := c.client.UploadIfMissing(context.Background(), chomks...); err != nil { return err } return c.setOutputs(target, ar) } // translateOS converts the OS name of a subrepo into a Bazel-style OS name. func translateOS(subrepo *core.Subrepo) string { if subrepo == nil { return reallyTranslateOS(runtime.GOOS) } return reallyTranslateOS(subrepo.Arch.OS) } func reallyTranslateOS(os string) string { switch os { case "darwin": return "macos" default: return os } } // buildEnv translates the set of environment variables for this target to a proto. func (c *Client) buildEnv(target *core.BuildTarget, env []string, sandbox bool) []*pb.Command_EnvironmentVariable { if sandbox { env = append(env, "SANDBOX=true") } if target != nil { if target.IsBinary { env = append(env, "_BINARY=true") } } sort.Strings(env) // Proto says it must be sorted (not just consistently ordered :( ) vars := make([]*pb.Command_EnvironmentVariable, len(env)) for i, e := range env { idx := strings.IndexByte(e, '=') vars[i] = &pb.Command_EnvironmentVariable{ Name: e[:idx], Value: e[idx+1:], } } return vars }
1
9,130
I think you need to be careful to honour isTest and stamp here.
thought-machine-please
go
@@ -14,10 +14,16 @@ */ package com.google.api.codegen.transformer.nodejs; +import com.google.api.codegen.config.MethodConfig; import com.google.api.codegen.transformer.ApiMethodParamTransformer; import com.google.api.codegen.transformer.MethodTransformerContext; +import com.google.api.codegen.transformer.SurfaceNamer; +import com.google.api.codegen.util.Name; import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView; import com.google.api.codegen.viewmodel.ParamDocView; +import com.google.api.codegen.viewmodel.SimpleParamDocView; +import com.google.api.tools.framework.model.Field; +import com.google.api.tools.framework.model.TypeRef; import com.google.common.collect.ImmutableList; import java.util.List;
1
/* Copyright 2017 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.nodejs; import com.google.api.codegen.transformer.ApiMethodParamTransformer; import com.google.api.codegen.transformer.MethodTransformerContext; import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView; import com.google.api.codegen.viewmodel.ParamDocView; import com.google.common.collect.ImmutableList; import java.util.List; public class NodeJSApiMethodParamTransformer implements ApiMethodParamTransformer { @Override public List<DynamicLangDefaultableParamView> generateMethodParams( MethodTransformerContext context) { // TODO(eoogbe): implement this method when migrating to MVVM return ImmutableList.<DynamicLangDefaultableParamView>of(); } @Override public List<ParamDocView> generateParamDocs(MethodTransformerContext context) { // TODO(eoogbe): implement this method when migrating to MVVM return ImmutableList.<ParamDocView>of(); } }
1
21,599
why zero value for `arrayType` for the default value of the optional args?
googleapis-gapic-generator
java
@@ -264,6 +264,14 @@ class Filter extends WidgetBase $value = post('options.value.' . $scope->scopeName) ?: null; $this->setScopeValue($scope, $value); break; + + case 'clear': + foreach ($this->getScopes() as $scope) { + $this->setScopeValue($scope, null); + } + + return ['.control-filter' => $this->render()]; + break; } /*
1
<?php namespace Backend\Widgets; use Db; use Str; use Lang; use Backend; use DbDongle; use Carbon\Carbon; use Backend\Classes\WidgetBase; use Backend\Classes\FilterScope; use ApplicationException; /** * Filter Widget * Renders a container used for filtering things. * * @package october\backend * @author Alexey Bobkov, Samuel Georges */ class Filter extends WidgetBase { // // Configurable properties // /** * @var array Scope definition configuration. */ public $scopes; /** * @var string The context of this filter, scopes that do not belong * to this context will not be shown. */ public $context; // // Object properties // /** * @inheritDoc */ protected $defaultAlias = 'filter'; /** * @var boolean Determines if scope definitions have been created. */ protected $scopesDefined = false; /** * @var array Collection of all scopes used in this filter. */ protected $allScopes = []; /** * @var array Collection of all scopes models used in this filter. */ protected $scopeModels = []; /** * @var array List of CSS classes to apply to the filter container element */ public $cssClasses = []; /** * Initialize the widget, called by the constructor and free from its parameters. */ public function init() { $this->fillFromConfig([ 'scopes', 'context', ]); } /** * Renders the widget. */ public function render() { $this->prepareVars(); return $this->makePartial('filter'); } /** * Prepares the view data */ public function prepareVars() { $this->defineFilterScopes(); $this->vars['cssClasses'] = implode(' ', $this->cssClasses); $this->vars['scopes'] = $this->allScopes; } /** * Renders the HTML element for a scope */ public function renderScopeElement($scope) { $params = ['scope' => $scope]; switch ($scope->type) { case 'date': if ($scope->value && $scope->value instanceof Carbon) { $params['dateStr'] = Backend::dateTime($scope->value, ['formatAlias' => 'dateMin']); $params['date'] = $scope->value->format('Y-m-d H:i:s'); } break; case 'daterange': if ($scope->value && is_array($scope->value) && count($scope->value) === 2 && $scope->value[0] && $scope->value[0] instanceof Carbon && $scope->value[1] && $scope->value[1] instanceof Carbon ) { $after = $scope->value[0]->format('Y-m-d H:i:s'); $before = $scope->value[1]->format('Y-m-d H:i:s'); if (strcasecmp($after, '0000-00-00 00:00:00') > 0) { $params['afterStr'] = Backend::dateTime($scope->value[0], ['formatAlias' => 'dateMin']); $params['after'] = $after; } else { $params['afterStr'] = '∞'; $params['after'] = null; } if (strcasecmp($before, '2999-12-31 23:59:59') < 0) { $params['beforeStr'] = Backend::dateTime($scope->value[1], ['formatAlias' => 'dateMin']); $params['before'] = $before; } else { $params['beforeStr'] = '∞'; $params['before'] = null; } } break; case 'number': if (is_numeric($scope->value)) { $params['number'] = $scope->value; } break; case 'numberrange': if ($scope->value && is_array($scope->value) && count($scope->value) === 2 && $scope->value[0] && $scope->value[1] ) { $min = $scope->value[0]; $max = $scope->value[1]; $params['minStr'] = $min ?: ''; $params['min'] = $min ?: null; $params['maxStr'] = $max ?: '∞'; $params['max'] = $max ?: null; } break; case 'text': $params['value'] = $scope->value; $params['size'] = array_get($scope->config, 'size', 10); break; } return $this->makePartial('scope_'.$scope->type, $params); } // // AJAX // /** * Update a filter scope value. * @return array */ public function onFilterUpdate() { $this->defineFilterScopes(); if (!$scope = post('scopeName')) { return; } $scope = $this->getScope($scope); switch ($scope->type) { case 'group': $active = $this->optionsFromAjax(post('options.active')); $this->setScopeValue($scope, $active); break; case 'checkbox': $checked = post('value') == 'true'; $this->setScopeValue($scope, $checked); break; case 'switch': $value = post('value'); $this->setScopeValue($scope, $value); break; case 'date': $dates = $this->datesFromAjax(post('options.dates')); if (!empty($dates)) { list($date) = $dates; } else { $date = null; } $this->setScopeValue($scope, $date); break; case 'daterange': $dates = $this->datesFromAjax(post('options.dates')); if (!empty($dates)) { list($after, $before) = $dates; $dates = [$after, $before]; } else { $dates = null; } $this->setScopeValue($scope, $dates); break; case 'number': $numbers = $this->numbersFromAjax(post('options.numbers')); if (!empty($numbers)) { list($number) = $numbers; } else { $number = null; } $this->setScopeValue($scope, $number); break; case 'numberrange': $numbers = $this->numbersFromAjax(post('options.numbers')); if (!empty($numbers)) { list($min, $max) = $numbers; $numbers = [$min, $max]; } else { $numbers = null; } $this->setScopeValue($scope, $numbers); break; case 'text': $value = post('options.value.' . $scope->scopeName) ?: null; $this->setScopeValue($scope, $value); break; } /* * Trigger class event, merge results as viewable array */ $params = func_get_args(); $result = $this->fireEvent('filter.update', [$params]); if ($result && is_array($result)) { return call_user_func_array('array_merge', $result); } } /** * Returns available options for group scope type. * @return array */ public function onFilterGetOptions() { $this->defineFilterScopes(); $searchQuery = post('search'); if (!$scopeName = post('scopeName')) { return; } $scope = $this->getScope($scopeName); $activeKeys = $scope->value ? array_keys($scope->value) : []; $available = $this->getAvailableOptions($scope, $searchQuery); $active = $searchQuery ? [] : $this->filterActiveOptions($activeKeys, $available); return [ 'scopeName' => $scopeName, 'options' => [ 'available' => $this->optionsToAjax($available), 'active' => $this->optionsToAjax($active), ] ]; } // // Internals // /** * Returns the available options a scope can use, either from the * model relation or from a supplied array. Optionally apply a search * constraint to the options. * @param string $scope * @param string $searchQuery * @return array */ protected function getAvailableOptions($scope, $searchQuery = null) { if ($scope->options) { return $this->getOptionsFromArray($scope, $searchQuery); } $available = []; $nameColumn = $this->getScopeNameFrom($scope); $options = $this->getOptionsFromModel($scope, $searchQuery); foreach ($options as $option) { $available[$option->getKey()] = $option->{$nameColumn}; } return $available; } /** * Removes any already selected options from the available options, returns * a newly built array. * @param array $activeKeys * @param array $availableOptions * @return array */ protected function filterActiveOptions(array $activeKeys, array &$availableOptions) { $active = []; foreach ($availableOptions as $id => $option) { if (!in_array($id, $activeKeys)) { continue; } $active[$id] = $option; unset($availableOptions[$id]); } return $active; } /** * Looks at the model for defined scope items. * @return Collection */ protected function getOptionsFromModel($scope, $searchQuery = null) { $model = $this->scopeModels[$scope->scopeName]; $query = $model->newQuery(); /* * The 'group' scope has trouble supporting more than 500 records at a time * @todo Introduce a more advanced version with robust list support. */ $query->limit(500); /** * @event backend.filter.extendQuery * Provides an opportunity to extend the query of the list of options * * Example usage: * * Event::listen('backend.filter.extendQuery', function ((\Backend\Widgets\Filter) $filterWidget, $query, (\Backend\Classes\FilterScope) $scope) { * if ($scope->scopeName == 'status') { * $query->where('status', '<>', 'all'); * } * }); * * Or * * $listWidget->bindEvent('filter.extendQuery', function ($query, (\Backend\Classes\FilterScope) $scope) { * if ($scope->scopeName == 'status') { * $query->where('status', '<>', 'all'); * } * }); * */ $this->fireSystemEvent('backend.filter.extendQuery', [$query, $scope]); if (!$searchQuery) { return $query->get(); } $searchFields = [$model->getKeyName(), $this->getScopeNameFrom($scope)]; return $query->searchWhere($searchQuery, $searchFields)->get(); } /** * Look at the defined set of options for scope items, or the model method. * @return array */ protected function getOptionsFromArray($scope, $searchQuery = null) { /* * Load the data */ $options = $scope->options; if (is_scalar($options)) { $model = $this->scopeModels[$scope->scopeName]; $methodName = $options; if (!$model->methodExists($methodName)) { throw new ApplicationException(Lang::get('backend::lang.filter.options_method_not_exists', [ 'model' => get_class($model), 'method' => $methodName, 'filter' => $scope->scopeName ])); } $options = $model->$methodName(); } elseif (!is_array($options)) { $options = []; } /* * Apply the search */ $searchQuery = Str::lower($searchQuery); if (strlen($searchQuery)) { $options = $this->filterOptionsBySearch($options, $searchQuery); } return $options; } /** * Filters an array of options by a search term. * @param array $options * @param string $query * @return array */ protected function filterOptionsBySearch($options, $query) { $filteredOptions = []; $optionMatchesSearch = function ($words, $option) { foreach ($words as $word) { $word = trim($word); if (!strlen($word)) { continue; } if (!Str::contains(Str::lower($option), $word)) { return false; } } return true; }; /* * Exact */ foreach ($options as $index => $option) { if (Str::is(Str::lower($option), $query)) { $filteredOptions[$index] = $option; unset($options[$index]); } } /* * Fuzzy */ $words = explode(' ', $query); foreach ($options as $index => $option) { if ($optionMatchesSearch($words, $option)) { $filteredOptions[$index] = $option; } } return $filteredOptions; } /** * Creates a flat array of filter scopes from the configuration. */ protected function defineFilterScopes() { if ($this->scopesDefined) { return; } /** * @event backend.filter.extendScopesBefore * Provides an opportunity to interact with the Filter widget before defining the filter scopes * * Example usage: * * Event::listen('backend.filter.extendScopesBefore', function ((\Backend\Widgets\Filter) $filterWidget) { * // Just in case you really had to do something before scopes are defined * }); * * Or * * $listWidget->bindEvent('filter.extendScopesBefore', function () use ((\Backend\Widgets\Filter) $filterWidget) { * // Just in case you really had to do something before scopes are defined * }); * */ $this->fireSystemEvent('backend.filter.extendScopesBefore'); /* * All scopes */ if (!isset($this->scopes) || !is_array($this->scopes)) { $this->scopes = []; } $this->addScopes($this->scopes); /** * @event backend.filter.extendScopes * Provides an opportunity to interact with the Filter widget & its scopes after the filter scopes have been initialized * * Example usage: * * Event::listen('backend.filter.extendScopes', function ((\Backend\Widgets\Filter) $filterWidget) { * $filterWidget->addScopes([ * 'my_scope' => [ * 'label' => 'My Filter Scope' * ] * ]); * }); * * Or * * $listWidget->bindEvent('filter.extendScopes', function () use ((\Backend\Widgets\Filter) $filterWidget) { * $filterWidget->removeScope('my_scope'); * }); * */ $this->fireSystemEvent('backend.filter.extendScopes'); $this->scopesDefined = true; } /** * Programatically add scopes, used internally and for extensibility. */ public function addScopes(array $scopes) { foreach ($scopes as $name => $config) { $scopeObj = $this->makeFilterScope($name, $config); /* * Check that the filter scope matches the active context */ if ($scopeObj->context !== null) { $context = is_array($scopeObj->context) ? $scopeObj->context : [$scopeObj->context]; if (!in_array($this->getContext(), $context)) { continue; } } /* * Validate scope model */ if (isset($config['modelClass'])) { $class = $config['modelClass']; $model = new $class; $this->scopeModels[$name] = $model; } /* * Ensure scope type options are set */ $scopeProperties = []; switch ($scopeObj->type) { case 'date': case 'daterange': $scopeProperties = [ 'minDate' => '2000-01-01', 'maxDate' => '2099-12-31', 'firstDay' => 0, 'yearRange' => 10, ]; break; } foreach ($scopeProperties as $property => $value) { if (isset($config[$property])) { $value = $config[$property]; } $scopeObj->{$property} = $value; } $this->allScopes[$name] = $scopeObj; } } /** * Programatically remove a scope, used for extensibility. * @param string $scopeName Scope name */ public function removeScope($scopeName) { if (isset($this->allScopes[$scopeName])) { unset($this->allScopes[$scopeName]); } } /** * Creates a filter scope object from name and configuration. */ protected function makeFilterScope($name, $config) { $label = $config['label'] ?? null; $scopeType = $config['type'] ?? null; $scope = new FilterScope($name, $label); $scope->displayAs($scopeType, $config); $scope->idPrefix = $this->alias; /* * Set scope value */ $scope->value = $this->getScopeValue($scope, @$config['default']); return $scope; } // // Filter query logic // /** * Applies all scopes to a DB query. * @param Builder $query * @return Builder */ public function applyAllScopesToQuery($query) { $this->defineFilterScopes(); foreach ($this->allScopes as $scope) { $this->applyScopeToQuery($scope, $query); } return $query; } /** * Applies a filter scope constraints to a DB query. * @param string $scope * @param Builder $query * @return Builder */ public function applyScopeToQuery($scope, $query) { if (is_string($scope)) { $scope = $this->getScope($scope); } if (!$scope->value) { return; } switch ($scope->type) { case 'date': if ($scope->value instanceof Carbon) { $value = $scope->value; /* * Condition */ if ($scopeConditions = $scope->conditions) { $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [ ':filtered' => $value->format('Y-m-d'), ':after' => $value->format('Y-m-d H:i:s'), ':before' => $value->copy()->addDay()->addMinutes(-1)->format('Y-m-d H:i:s') ]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($value); } } break; case 'daterange': if (is_array($scope->value) && count($scope->value) > 1) { list($after, $before) = array_values($scope->value); if ($after && $after instanceof Carbon && $before && $before instanceof Carbon) { /* * Condition */ if ($scopeConditions = $scope->conditions) { $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [ ':afterDate' => $after->format('Y-m-d'), ':after' => $after->format('Y-m-d H:i:s'), ':beforeDate' => $before->format('Y-m-d'), ':before' => $before->format('Y-m-d H:i:s') ]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($after, $before); } } } break; case 'number': if (is_numeric($scope->value)) { /* * Condition */ if ($scopeConditions = $scope->conditions) { $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [ ':filtered' => $scope->value, ]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($scope->value); } } break; case 'numberrange': if (is_array($scope->value) && count($scope->value) > 1) { list($min, $max) = array_values($scope->value); if ($min && $max) { /* * Condition * */ if ($scopeConditions = $scope->conditions) { $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [ ':min' => $min, ':max' => $max ]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($min, $max); } } } break; case 'text': /* * Condition */ if ($scopeConditions = $scope->conditions) { $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [ ':value' => Db::getPdo()->quote($scope->value), ]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($scope->value); } break; default: $value = is_array($scope->value) ? array_keys($scope->value) : $scope->value; /* * Condition */ if ($scopeConditions = $scope->conditions) { /* * Switch scope: multiple conditions, value either 1 or 2 */ if (is_array($scopeConditions)) { $conditionNum = is_array($value) ? 0 : $value - 1; list($scopeConditions) = array_slice($scopeConditions, $conditionNum); } if (is_array($value)) { $filtered = implode(',', array_build($value, function ($key, $_value) { return [$key, Db::getPdo()->quote($_value)]; })); } else { $filtered = Db::getPdo()->quote($value); } $query->whereRaw(DbDongle::parse(strtr($scopeConditions, [':filtered' => $filtered]))); } /* * Scope */ elseif ($scopeMethod = $scope->scope) { $query->$scopeMethod($value); } break; } return $query; } // // Access layer // /** * Returns a scope value for this widget instance. */ public function getScopeValue($scope, $default = null) { if (is_string($scope)) { $scope = $this->getScope($scope); } $cacheKey = 'scope-'.$scope->scopeName; return $this->getSession($cacheKey, $default); } /** * Sets an scope value for this widget instance. */ public function setScopeValue($scope, $value) { if (is_string($scope)) { $scope = $this->getScope($scope); } $cacheKey = 'scope-'.$scope->scopeName; $this->putSession($cacheKey, $value); $scope->value = $value; } /** * Get all the registered scopes for the instance. * @return array */ public function getScopes() { return $this->allScopes; } /** * Get a specified scope object * @param string $scope * @return mixed */ public function getScope($scope) { if (!isset($this->allScopes[$scope])) { throw new ApplicationException('No definition for scope ' . $scope); } return $this->allScopes[$scope]; } /** * Returns the display name column for a scope. * @param string $scope * @return string */ public function getScopeNameFrom($scope) { if (is_string($scope)) { $scope = $this->getScope($scope); } return $scope->nameFrom; } /** * Returns the active context for displaying the filter. * @return string */ public function getContext() { return $this->context; } // // Helpers // /** * Convert a key/pair array to a named array {id: 1, name: 'Foobar'} * @param array $options * @return array */ protected function optionsToAjax($options) { $processed = []; foreach ($options as $id => $result) { $processed[] = ['id' => $id, 'name' => trans($result)]; } return $processed; } /** * Convert a named array to a key/pair array * @param array $options * @return array */ protected function optionsFromAjax($options) { $processed = []; if (!is_array($options)) { return $processed; } foreach ($options as $option) { $id = array_get($option, 'id'); if ($id === null) { continue; } $processed[$id] = array_get($option, 'name'); } return $processed; } /** * Convert an array from the posted dates * * @param array $dates * * @return array */ protected function datesFromAjax($ajaxDates) { $dates = []; $dateRegex = '/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/'; if (null !== $ajaxDates) { if (!is_array($ajaxDates)) { if (preg_match($dateRegex, $ajaxDates)) { $dates = [$ajaxDates]; } } else { foreach ($ajaxDates as $i => $date) { if (preg_match($dateRegex, $date)) { $dates[] = Carbon::createFromFormat('Y-m-d H:i:s', $date); } elseif (empty($date)) { if ($i == 0) { $dates[] = Carbon::createFromFormat('Y-m-d H:i:s', '0000-00-00 00:00:00'); } else { $dates[] = Carbon::createFromFormat('Y-m-d H:i:s', '2999-12-31 23:59:59'); } } else { $dates = []; break; } } } } return $dates; } /** * Convert an array from the posted numbers * * @param array $dates * * @return array */ protected function numbersFromAjax($ajaxNumbers) { $numbers = []; $numberRegex = '/\d/'; if (!empty($ajaxNumbers)) { if (!is_array($ajaxNumbers) && preg_match($numberRegex, $ajaxNumbers)) { $numbers = [$ajaxNumbers]; } else { foreach ($ajaxNumbers as $i => $number) { if (preg_match($numberRegex, $number)) { $numbers[] = $number; } else { $numbers = []; break; } } } } return $numbers; } /** * @param mixed $scope * * @return string */ protected function getFilterDateFormat($scope) { if (isset($scope->date_format)) { return $scope->date_format; } return trans('backend::lang.filter.date.format'); } }
1
17,294
@robinbonnes ~~Would you mind putting the array key and value on a new line - it makes it more readable as an array.~~ Never mind, see discussion below.
octobercms-october
php
@@ -26,6 +26,7 @@ var version = "(unreleased version)" func main() { var ( justVersion bool + cniNet, cniIpam bool address string meshAddress string logLevel string
1
package main import ( "flag" "fmt" "net" "net/http" "os" "os/signal" "strings" "syscall" cni "github.com/appc/cni/pkg/skel" "github.com/docker/libnetwork/ipamapi" weaveapi "github.com/weaveworks/weave/api" . "github.com/weaveworks/weave/common" "github.com/weaveworks/weave/common/docker" weavenet "github.com/weaveworks/weave/net" ipamplugin "github.com/weaveworks/weave/plugin/ipam" netplugin "github.com/weaveworks/weave/plugin/net" "github.com/weaveworks/weave/plugin/skel" ) var version = "(unreleased version)" func main() { var ( justVersion bool address string meshAddress string logLevel string noMulticastRoute bool ) flag.BoolVar(&justVersion, "version", false, "print version and exit") flag.StringVar(&logLevel, "log-level", "info", "logging level (debug, info, warning, error)") flag.StringVar(&address, "socket", "/run/docker/plugins/weave.sock", "socket on which to listen") flag.StringVar(&meshAddress, "meshsocket", "/run/docker/plugins/weavemesh.sock", "socket on which to listen in mesh mode") flag.BoolVar(&noMulticastRoute, "no-multicast-route", false, "do not add a multicast route to network endpoints") flag.Parse() if justVersion { fmt.Printf("weave plugin %s\n", version) os.Exit(0) } SetLogLevel(logLevel) weave := weaveapi.NewClient(os.Getenv("WEAVE_HTTP_ADDR"), Log) switch { case strings.HasSuffix(os.Args[0], "weave-ipam"): i := ipamplugin.NewIpam(weave) cni.PluginMain(i.CmdAdd, i.CmdDel) os.Exit(0) case strings.HasSuffix(os.Args[0], "weave"): n := netplugin.NewCNIPlugin(weave) cni.PluginMain(n.CmdAdd, n.CmdDel) os.Exit(0) } // API 1.21 is the first version that supports docker network commands dockerClient, err := docker.NewVersionedClientFromEnv("1.21") if err != nil { Log.Fatalf("unable to connect to docker: %s", err) } Log.Println("Weave plugin", version, "Command line options:", os.Args[1:]) Log.Info(dockerClient.Info()) err = run(dockerClient, weave, address, meshAddress, noMulticastRoute) if err != nil { Log.Fatal(err) } } func run(dockerClient *docker.Client, weave *weaveapi.Client, address, meshAddress string, noMulticastRoute bool) error { endChan := make(chan error, 1) if address != "" { globalListener, err := listenAndServe(dockerClient, weave, address, noMulticastRoute, endChan, "global", false) if err != nil { return err } defer os.Remove(address) defer globalListener.Close() } if meshAddress != "" { meshListener, err := listenAndServe(dockerClient, weave, meshAddress, noMulticastRoute, endChan, "local", true) if err != nil { return err } defer os.Remove(meshAddress) defer meshListener.Close() } statusListener, err := weavenet.ListenUnixSocket("/home/weave/status.sock") if err != nil { return err } go serveStatus(statusListener) sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt, os.Kill, syscall.SIGTERM) select { case sig := <-sigChan: Log.Debugf("Caught signal %s; shutting down", sig) return nil case err := <-endChan: return err } } func listenAndServe(dockerClient *docker.Client, weave *weaveapi.Client, address string, noMulticastRoute bool, endChan chan<- error, scope string, withIpam bool) (net.Listener, error) { d, err := netplugin.New(dockerClient, weave, scope, noMulticastRoute) if err != nil { return nil, err } var i ipamapi.Ipam if withIpam { i = ipamplugin.NewIpam(weave) } listener, err := weavenet.ListenUnixSocket(address) if err != nil { return nil, err } Log.Printf("Listening on %s for %s scope", address, scope) go func() { endChan <- skel.Listen(listener, d, i) }() return listener, nil } func serveStatus(listener net.Listener) { server := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "ok") })} if err := server.Serve(listener); err != nil { Log.Fatalf("ListenAndServeStatus failed: %s", err) } }
1
12,601
I've never seen us do this anywhere else - why here? Why not `address, meshAddress string`?
weaveworks-weave
go
@@ -191,7 +191,9 @@ public class RequestUtil { } // implement compat for existing components... - JsonQueryConverter jsonQueryConverter = new JsonQueryConverter(); + JsonQueryConverter jsonQueryConverter = (JsonQueryConverter) req.getContext() + .computeIfAbsent(JsonQueryConverter.contextKey, (k)->new JsonQueryConverter()); + if (json != null && !isShard) { for (Map.Entry<String,Object> entry : json.entrySet()) { String key = entry.getKey();
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.request.json; import java.io.IOException; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.apache.commons.io.IOUtils; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.MultiMapSolrParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.ContentStream; import org.apache.solr.common.util.StrUtils; import org.apache.solr.handler.component.SearchHandler; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.request.macro.MacroExpander; import org.noggit.JSONParser; import org.noggit.ObjectBuilder; import static org.apache.solr.common.params.CommonParams.JSON; import static org.apache.solr.common.params.CommonParams.SORT; public class RequestUtil { /** * Set default-ish params on a SolrQueryRequest as well as do standard macro processing and JSON request parsing. * * @param handler The search handler this is for (may be null if you don't want this method touching the content streams) * @param req The request whose params we are interested in * @param defaults values to be used if no values are specified in the request params * @param appends values to be appended to those from the request (or defaults) when dealing with multi-val params, or treated as another layer of defaults for singl-val params. * @param invariants values which will be used instead of any request, or default values, regardless of context. */ public static void processParams(SolrRequestHandler handler, SolrQueryRequest req, SolrParams defaults, SolrParams appends, SolrParams invariants) { boolean searchHandler = handler instanceof SearchHandler; SolrParams params = req.getParams(); // Handle JSON stream for search requests if (searchHandler && req.getContentStreams() != null) { Map<String,String[]> map = MultiMapSolrParams.asMultiMap(params, false); if (!(params instanceof MultiMapSolrParams || params instanceof ModifiableSolrParams)) { // need to set params on request since we weren't able to access the original map params = new MultiMapSolrParams(map); req.setParams(params); } String[] jsonFromParams = map.remove(JSON); // params from the query string should come after (and hence override) JSON content streams for (ContentStream cs : req.getContentStreams()) { String contentType = cs.getContentType(); if (contentType==null || !contentType.contains("/json")) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad contentType for search handler :" + contentType + " request="+req); } try { String jsonString = IOUtils.toString( cs.getReader() ); if (jsonString != null) { MultiMapSolrParams.addParam(JSON, jsonString, map); } } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception reading content stream for request:"+req, e); } } // append existing "json" params if (jsonFromParams != null) { for (String json : jsonFromParams) { MultiMapSolrParams.addParam(JSON, json, map); } } } String[] jsonS = params.getParams(JSON); boolean hasAdditions = defaults != null || invariants != null || appends != null || jsonS != null; // short circuit processing if (!hasAdditions && !params.getBool("expandMacros", true)) { return; // nothing to do... } boolean isShard = params.getBool("isShard", false); Map<String, String[]> newMap = MultiMapSolrParams.asMultiMap(params, hasAdditions); // see if the json has a "params" section // TODO: we should currently *not* do this if this is a leaf of a distributed search since it could overwrite parameters set by the top-level // The parameters we extract will be propagated anyway. if (jsonS != null && !isShard) { for (String json : jsonS) { getParamsFromJSON(newMap, json); } } // first populate defaults, etc.. if (defaults != null) { Map<String, String[]> defaultsMap = MultiMapSolrParams.asMultiMap(defaults); for (Map.Entry<String, String[]> entry : defaultsMap.entrySet()) { String key = entry.getKey(); if (!newMap.containsKey(key)) { newMap.put(key, entry.getValue()); } } } if (appends != null) { Map<String, String[]> appendsMap = MultiMapSolrParams.asMultiMap(appends); for (Map.Entry<String, String[]> entry : appendsMap.entrySet()) { String key = entry.getKey(); String[] arr = newMap.get(key); if (arr == null) { newMap.put(key, entry.getValue()); } else { String[] appendArr = entry.getValue(); String[] newArr = new String[arr.length + appendArr.length]; System.arraycopy(arr, 0, newArr, 0, arr.length); System.arraycopy(appendArr, 0, newArr, arr.length, appendArr.length); newMap.put(key, newArr); } } } if (invariants != null) { newMap.putAll( MultiMapSolrParams.asMultiMap(invariants) ); } if (!isShard) { // Don't expand macros in shard requests String[] doMacrosStr = newMap.get("expandMacros"); boolean doMacros = true; if (doMacrosStr != null) { doMacros = "true".equals(doMacrosStr[0]); } if (doMacros) { newMap = MacroExpander.expand(newMap); } } // Set these params as soon as possible so if there is an error processing later, things like // "wt=json" will take effect from the defaults. SolrParams newParams = new MultiMapSolrParams(newMap); // newMap may still change below, but that should be OK req.setParams(newParams); // Skip the rest of the processing (including json processing for now) if this isn't a search handler. // For example json.command started to be used in SOLR-6294, and that caused errors here. if (!searchHandler) return; Map<String, Object> json = null; // Handle JSON body first, so query params will always overlay on that jsonS = newMap.get(JSON); if (jsonS != null) { if (json == null) { json = new LinkedHashMap<>(); } mergeJSON(json, JSON, jsonS, new ObjectUtil.ConflictHandler()); } for (Map.Entry<String, String[]> entry : newMap.entrySet()) { String key = entry.getKey(); // json.nl, json.wrf are existing query parameters if (key.startsWith("json.") && !("json.nl".equals(key) || "json.wrf".equals(key))) { if (json == null) { json = new LinkedHashMap<>(); } mergeJSON(json, key, entry.getValue(), new ObjectUtil.ConflictHandler()); } } // implement compat for existing components... JsonQueryConverter jsonQueryConverter = new JsonQueryConverter(); if (json != null && !isShard) { for (Map.Entry<String,Object> entry : json.entrySet()) { String key = entry.getKey(); String out = null; boolean isQuery = false; boolean arr = false; if ("query".equals(key)) { out = "q"; isQuery = true; } else if ("filter".equals(key)) { out = "fq"; arr = true; isQuery = true; } else if ("fields".equals(key)) { out = "fl"; arr = true; } else if ("offset".equals(key)) { out = "start"; } else if ("limit".equals(key)) { out = "rows"; } else if (SORT.equals(key)) { out = SORT; } else if ("params".equals(key) || "facet".equals(key) ) { // handled elsewhere continue; } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown top-level key in JSON request : " + key); } Object val = entry.getValue(); if (arr) { String[] existing = newMap.get(out); List lst = val instanceof List ? (List)val : null; int existingSize = existing==null ? 0 : existing.length; int jsonSize = lst==null ? 1 : lst.size(); String[] newval = new String[ existingSize + jsonSize ]; for (int i=0; i<existingSize; i++) { newval[i] = existing[i]; } if (lst != null) { for (int i = 0; i < jsonSize; i++) { Object v = lst.get(i); newval[existingSize + i] = isQuery ? jsonQueryConverter.toLocalParams(v, newMap) : v.toString(); } } else { newval[newval.length-1] = isQuery ? jsonQueryConverter.toLocalParams(val, newMap) : val.toString(); } newMap.put(out, newval); } else { newMap.put(out, new String[]{isQuery ? jsonQueryConverter.toLocalParams(val, newMap) : val.toString()}); } } } if (json != null) { req.setJSON(json); } } // queryParamName is something like json.facet or json.query, or just json... private static void mergeJSON(Map<String,Object> json, String queryParamName, String[] vals, ObjectUtil.ConflictHandler handler) { try { List<String> path = StrUtils.splitSmart(queryParamName, ".", true); path = path.subList(1, path.size()); for (String jsonStr : vals) { Object o = ObjectBuilder.fromJSONStrict(jsonStr); // zero-length strings or comments can cause this to be null (and a zero-length string can result from a json content-type w/o a body) if (o != null) { ObjectUtil.mergeObjects(json, path, o, handler); } } } catch (JSONParser.ParseException e ) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); } catch (IOException e) { // impossible } } private static void getParamsFromJSON(Map<String, String[]> params, String json) { if (json.indexOf("params") < 0) { return; } JSONParser parser = new JSONParser(json); try { JSONUtil.expect(parser, JSONParser.OBJECT_START); boolean found = JSONUtil.advanceToMapKey(parser, "params", false); if (!found) { return; } parser.nextEvent(); // advance to the value Object o = ObjectBuilder.getVal(parser); if (!(o instanceof Map)) return; Map<String,Object> map = (Map<String,Object>)o; // To make consistent with json.param handling, we should make query params come after json params (i.e. query params should // appear to overwrite json params. // Solr params are based on String though, so we need to convert for (Map.Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object val = entry.getValue(); if (params.get(key) != null) { continue; } if (val == null) { params.remove(key); } else if (val instanceof List) { List lst = (List) val; String[] vals = new String[lst.size()]; for (int i = 0; i < vals.length; i++) { vals[i] = lst.get(i).toString(); } params.put(key, vals); } else { params.put(key, new String[]{val.toString()}); } } } catch (Exception e) { // ignore parse exceptions at this stage, they may be caused by incomplete macro expansions return; } } }
1
31,726
I think it's necessary only for the earlier "dynamic" approach. I suppose it's not necessary in the eager one.
apache-lucene-solr
java
@@ -437,4 +437,9 @@ class Configurator ? $this->doctrineTypeToFormTypeMap[$doctrineType] : $doctrineType; } + + public function getBackendConfig() + { + return $this->backendConfig; + } }
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace JavierEguiluz\Bundle\EasyAdminBundle\Configuration; use Doctrine\ORM\Mapping\ClassMetadata; use Doctrine\ORM\Mapping\ClassMetadataInfo; use JavierEguiluz\Bundle\EasyAdminBundle\Reflection\EntityMetadataInspector; use JavierEguiluz\Bundle\EasyAdminBundle\Reflection\ClassPropertyReflector; class Configurator { private $backendConfig = array(); private $entitiesConfig = array(); private $doctrineManager; private $reflector; private $defaultEntityFields = array(); private $defaultEntityFieldConfiguration = array( 'class' => null, // CSS class/classes 'format' => null, // date/time/datetime/number format 'help' => null, // form field help message 'label' => null, // form field label (if 'null', autogenerate it) 'type' => null, // it holds 'dataType' for list/show and 'fieldType' for new/edit 'fieldType' => null, // Symfony form field type (text, date, number, choice, ...) 'dataType' => null, // Doctrine property data type (text, date, integer, boolean, ...) 'virtual' => false, // is a virtual field or a real entity property? ); private $doctrineTypeToFormTypeMap = array( 'array' => 'collection', 'association' => null, 'bigint' => 'text', 'blob' => 'textarea', 'boolean' => 'checkbox', 'date' => 'date', 'datetime' => 'datetime', 'datetimetz' => 'datetime', 'decimal' => 'number', 'float' => 'number', 'guid' => 'text', 'integer' => 'integer', 'json_array' => 'textarea', 'object' => 'textarea', 'simple_array' => 'collection', 'smallint' => 'integer', 'string' => 'text', 'text' => 'textarea', 'time' => 'time', ); public function __construct(array $backendConfig, EntityMetadataInspector $inspector, ClassPropertyReflector $reflector) { $this->backendConfig = $backendConfig; $this->inspector = $inspector; $this->reflector = $reflector; } /** * Processes and returns the full configuration for the given entity name. * This configuration includes all the information about the form fields * and properties of the entity. * * @param string $entityName * @return array The full entity configuration */ public function getEntityConfiguration($entityName) { // if the configuration has already been processed for the given entity, reuse it if (isset($this->entitiesConfig[$entityName])) { return $this->entitiesConfig[$entityName]; } if (!isset($this->backendConfig['entities'][$entityName])) { throw new \InvalidArgumentException(sprintf('Entity "%s" is not managed by EasyAdmin.', $entityName)); } $entityConfiguration = $this->backendConfig['entities'][$entityName]; $entityMetadata = $this->inspector->getEntityMetadata($entityConfiguration['class']); $entityConfiguration['primary_key_field_name'] = $entityMetadata->getSingleIdentifierFieldName(); $entityProperties = $this->processEntityPropertiesMetadata($entityMetadata); $entityConfiguration['properties'] = $entityProperties; // default fields used when the action (list, edit, etc.) doesn't define its own fields $this->defaultEntityFields = $this->createFieldsFromEntityProperties($entityProperties); $entityConfiguration['list']['fields'] = $this->getFieldsForListAction($entityConfiguration); $entityConfiguration['show']['fields'] = $this->getFieldsForShowAction($entityConfiguration); $entityConfiguration['edit']['fields'] = $this->getFieldsForFormBasedActions('edit', $entityConfiguration); $entityConfiguration['new']['fields'] = $this->getFieldsForFormBasedActions('new', $entityConfiguration); $entityConfiguration['search']['fields'] = $this->getFieldsForSearchAction($entityConfiguration); $entityConfiguration = $this->introspectGettersAndSetters($entityConfiguration); $this->entitiesConfig[$entityName] = $entityConfiguration; return $entityConfiguration; } /** * Takes the entity metadata introspected via Doctrine and completes its * contents to simplify data processing for the rest of the application. * * @param ClassMetadata $entityMetadata The entity metadata introspected via Doctrine * @return array The entity properties metadata provided by Doctrine */ private function processEntityPropertiesMetadata(ClassMetadata $entityMetadata) { $entityPropertiesMetadata = array(); if ($entityMetadata->isIdentifierComposite) { throw new \RuntimeException(sprintf("The '%s' entity isn't valid because it contains a composite primary key.", $entityMetadata->name)); } // introspect regular entity fields foreach ($entityMetadata->fieldMappings as $fieldName => $fieldMetadata) { // field names are tweaked this way to simplify Twig templates and extensions $fieldName = str_replace('_', '', $fieldName); $entityPropertiesMetadata[$fieldName] = $fieldMetadata; } // introspect fields for entity associations (except many-to-many) foreach ($entityMetadata->associationMappings as $fieldName => $associationMetadata) { if (ClassMetadataInfo::MANY_TO_MANY !== $associationMetadata['type']) { // field names are tweaked this way to simplify Twig templates and extensions $fieldName = str_replace('_', '', $fieldName); $entityPropertiesMetadata[$fieldName] = array( 'type' => 'association', 'associationType' => $associationMetadata['type'], 'fieldName' => $fieldName, 'fetch' => $associationMetadata['fetch'], 'isOwningSide' => $associationMetadata['isOwningSide'], 'targetEntity' => $associationMetadata['targetEntity'], ); } } return $entityPropertiesMetadata; } /** * Returns the list of fields to show in the 'list' action of this entity. * * @param array $entityConfiguration * @return array The list of fields to show and their metadata */ private function getFieldsForListAction(array $entityConfiguration) { $entityFields = array(); // there is a custom configuration for 'list' fields if (count($entityConfiguration['list']['fields']) > 0) { return $this->normalizeFieldsConfiguration('list', $entityConfiguration); } return $this->filterListFieldsBasedOnSmartGuesses($this->defaultEntityFields); } /** * Returns the list of fields to show in the 'show' action of this entity. * * @param array $entityConfiguration * @return array The list of fields to show and their metadata */ private function getFieldsForShowAction(array $entityConfiguration) { // there is a custom configuration for 'show' fields if (count($entityConfiguration['show']['fields']) > 0) { return $this->normalizeFieldsConfiguration('show', $entityConfiguration); } return $this->defaultEntityFields; } /** * Returns the list of fields to show in the forms of this entity for the * actions which display forms ('edit' and 'new'). * * @param array $entityConfiguration * @return array The list of fields to show and their metadata */ protected function getFieldsForFormBasedActions($action, array $entityConfiguration) { $entityFields = array(); // there is a custom field configuration for this action if (count($entityConfiguration[$action]['fields']) > 0) { $entityFields = $this->normalizeFieldsConfiguration($action, $entityConfiguration); } else { $excludedFieldNames = array($entityConfiguration['primary_key_field_name']); $excludedFieldTypes = array('binary', 'blob', 'json_array', 'object'); $entityFields = $this->filterFieldsByNameAndType($this->defaultEntityFields, $excludedFieldNames, $excludedFieldTypes); } return $entityFields; } /** * Returns the list of entity fields on which the search query is performed. * * @param array $entityConfiguration * @return array The list of fields to use for the search */ private function getFieldsForSearchAction(array $entityConfiguration) { $excludedFieldNames = array(); $excludedFieldTypes = array('association', 'binary', 'blob', 'date', 'datetime', 'datetimetz', 'guid', 'time', 'object'); return $this->filterFieldsByNameAndType($this->defaultEntityFields, $excludedFieldNames, $excludedFieldTypes); } /** * If the backend configuration doesn't define any options for the fields of some entity, * create some basic field configuration based on the entity's Doctrine metadata. * * @param array $entityProperties * @return array The array of fields */ private function createFieldsFromEntityProperties($entityProperties) { $fields = array(); foreach ($entityProperties as $propertyName => $propertyMetadata) { $metadata = array_replace($this->defaultEntityFieldConfiguration, $propertyMetadata); $metadata['property'] = $propertyName; $metadata['dataType'] = $propertyMetadata['type']; $metadata['fieldType'] = $this->getFormTypeFromDoctrineType($propertyMetadata['type']); $metadata['format'] = $this->getFieldFormat($propertyMetadata['type']); $fields[$propertyName] = $metadata; } return $fields; } /** * Guesses the best fields to display in a listing when the entity doesn't * define any configuration. It does so limiting the number of fields to * display and discarding several field types. * * @param array $entityFields * @return array The list of fields to display */ private function filterListFieldsBasedOnSmartGuesses(array $entityFields) { // empirical guess: listings with more than 7 fields look ugly $maxListFields = 7; $excludedFieldNames = array('password', 'salt', 'slug', 'updatedAt', 'uuid'); $excludedFieldTypes = array('array', 'binary', 'blob', 'guid', 'json_array', 'object', 'simple_array', 'text'); // if the entity has few fields, show them all if (count($entityFields) <= $maxListFields) { return $entityFields; } // if the entity has a lot of fields, try to guess which fields we can remove $filteredFields = $entityFields; foreach ($entityFields as $name => $metadata) { if (in_array($name, $excludedFieldNames) || in_array($metadata['type'], $excludedFieldTypes)) { unset($filteredFields[$name]); // whenever a field is removed, check again if we are below the acceptable number of fields if (count($filteredFields) <= $maxListFields) { return $filteredFields; } } } // if the entity has still a lot of remaining fields, just slice the last ones return array_slice($filteredFields, 0, $maxListFields); } /** * Filters a list of fields excluding the given list of field names and field types. * * @param array $fields * @param array $excludedFieldNames * @param array $excludedFieldTypes * @return array The filtered list of fields */ private function filterFieldsByNameAndType(array $fields, array $excludedFieldNames, array $excludedFieldTypes) { $filteredFields = array(); foreach ($fields as $name => $metadata) { if (!in_array($name, $excludedFieldNames) && !in_array($metadata['type'], $excludedFieldTypes)) { $filteredFields[$name] = $fields[$name]; } } return $filteredFields; } /** * This method takes the default field configuration, the Doctrine's entity * metadata and the configured field options to merge and process them all * and generate the final and complete field configuration. * * @param string $action * @param array $entityConfiguration * @return array The complete field configuration */ private function normalizeFieldsConfiguration($action, $entityConfiguration) { $configuration = array(); $fieldsConfiguration = $entityConfiguration[$action]['fields']; foreach ($fieldsConfiguration as $fieldName => $fieldConfiguration) { if (!array_key_exists($fieldName, $entityConfiguration['properties'])) { // treat this field as 'virtual' because it doesn't exist as a // property of the related Doctrine entity $normalizedConfiguration = array_replace( $this->defaultEntityFieldConfiguration, $fieldConfiguration ); $normalizedConfiguration['virtual'] = true; } else { // this is a regular field that exists as a property of the related Doctrine entity $normalizedConfiguration = array_replace( $this->defaultEntityFieldConfiguration, $entityConfiguration['properties'][$fieldName], $fieldConfiguration ); } // 'list' and 'show' actions: use the value of the 'type' option as // the 'dataType' option because the previous code has already // prioritized end-user preferences over Doctrine and default values if (in_array($action, array('list', 'show'))) { $normalizedConfiguration['dataType'] = $normalizedConfiguration['type']; } // 'new' and 'edit' actions: if the user has defined the 'type' option // for the field, use it as 'fieldType. Otherwise, infer the best field // type using the property data type. if (in_array($action, array('edit', 'new'))) { if (isset($fieldConfiguration['type'])) { $normalizedConfiguration['fieldType'] = $fieldConfiguration['type']; } else { $normalizedConfiguration['fieldType'] = $this->getFormTypeFromDoctrineType($normalizedConfiguration['type']); } } // special case for the 'list' action: 'boolean' properties are displayed // as toggleable flip switches unless end-user configures their type explicitly if ('list' === $action && 'boolean' === $normalizedConfiguration['dataType'] && !isset($fieldConfiguration['type'])) { $normalizedConfiguration['dataType'] = 'toggle'; } if (null === $normalizedConfiguration['format']) { $normalizedConfiguration['format'] = $this->getFieldFormat($normalizedConfiguration['type']); } $configuration[$fieldName] = $normalizedConfiguration; } return $configuration; } /** * Returns the date/time/datetime/number format for the given field * according to its type and the default formats defined for the backend. * * @param string $fieldType * @return string The format that should be applied to the field value */ private function getFieldFormat($fieldType) { if (in_array($fieldType, array('date', 'time', 'datetime', 'datetimetz'))) { // make 'datetimetz' use the same format as 'datetime' $fieldType = ('datetimetz' === $fieldType) ? 'datetime' : $fieldType; return $this->backendConfig['formats'][$fieldType]; } if (in_array($fieldType, array('bigint', 'integer', 'smallint', 'decimal', 'float'))) { return isset($this->backendConfig['formats']['number']) ? $this->backendConfig['formats']['number'] : null; } } /** * Introspects the getters and setters for the fields used by all actions. * This preprocessing saves a lot of further processing when accessing or * setting the value of the entity properties. * * @param array $entityConfiguration * @return array */ private function introspectGettersAndSetters($entityConfiguration) { foreach (array('new', 'edit', 'list', 'show', 'search') as $action) { $fieldsConfiguration = $entityConfiguration[$action]['fields']; foreach ($fieldsConfiguration as $fieldName => $fieldConfiguration) { $getter = $this->reflector->getGetter($entityConfiguration['class'], $fieldName); $fieldConfiguration['getter'] = $getter; $setter = $this->reflector->getSetter($entityConfiguration['class'], $fieldName); $fieldConfiguration['setter'] = $setter; $isPublic = $this->reflector->isPublic($entityConfiguration['class'], $fieldName); $fieldConfiguration['isPublic'] = $isPublic; $fieldConfiguration['canBeGet'] = $getter || $isPublic; $fieldConfiguration['canBeSet'] = $setter || $isPublic; $entityConfiguration[$action]['fields'][$fieldName] = $fieldConfiguration; } } return $entityConfiguration; } /** * Returns the most appropriate Symfony Form type for the given Doctrine type. * * @param string $doctrineType * @return string */ private function getFormTypeFromDoctrineType($doctrineType) { // don't change this array_key_exists() by isset() because the Doctrine // type map can return 'null' values that shouldn't be ignored return array_key_exists($doctrineType, $this->doctrineTypeToFormTypeMap) ? $this->doctrineTypeToFormTypeMap[$doctrineType] : $doctrineType; } }
1
8,137
Same here than [there](#r26168925)
EasyCorp-EasyAdminBundle
php
@@ -53,12 +53,12 @@ public class ApplicationDTO { this.typeId = builder.typeId; this.categoryId = builder.categoryId; if (builder.id != null) { - if (builder.id.matches("^[a-zA-Z0-9_]+$")) { + if (builder.id.matches("^[a-zA-Z0-9_.]+$")) { this.id = builder.id; } else { LOGGER.warn(String.format("Application ID (%s) contains invalid characters, will remove them.", builder.id)); - this.id = builder.id.replaceAll("[^a-zA-Z0-9_]", ""); + this.id = builder.id.replaceAll("[^a-zA-Z0-9_.]", ""); } } else { this.id = null;
1
/* * Copyright (C) 2015-2017 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package org.phoenicis.repository.dto; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.phoenicis.configuration.localisation.Translatable; import org.phoenicis.configuration.localisation.TranslatableBuilder; import org.phoenicis.configuration.localisation.Translate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.URI; import java.util.*; /** * Represents an application */ @JsonDeserialize(builder = ApplicationDTO.Builder.class) @Translatable public class ApplicationDTO { private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationDTO.class); private final String typeId; private final String categoryId; private final String id; private final String name; private final String description; private final URI icon; private final List<URI> miniatures; private final List<ScriptDTO> scripts; private final List<ResourceDTO> resources; private ApplicationDTO(Builder builder) { this.typeId = builder.typeId; this.categoryId = builder.categoryId; if (builder.id != null) { if (builder.id.matches("^[a-zA-Z0-9_]+$")) { this.id = builder.id; } else { LOGGER.warn(String.format("Application ID (%s) contains invalid characters, will remove them.", builder.id)); this.id = builder.id.replaceAll("[^a-zA-Z0-9_]", ""); } } else { this.id = null; } this.name = builder.name == null ? builder.id : builder.name; this.description = builder.description; this.icon = builder.icon; this.miniatures = builder.miniatures; this.scripts = builder.scripts; this.resources = builder.resources; } public static Comparator<ApplicationDTO> nameComparator() { return (o1, o2) -> o1.getName().compareToIgnoreCase(o2.getName()); } public String getTypeId() { return typeId; } public String getCategoryId() { return categoryId; } public List<ResourceDTO> getResources() { return resources; } public URI getIcon() { return icon; } public List<URI> getMiniatures() { return miniatures; } public String getId() { return id; } @Translate public String getName() { return name; } @Translate public String getDescription() { return description; } @Translate public List<ScriptDTO> getScripts() { return scripts; } /** * Returns the main miniature belonging to this {@link ApplicationDTO}. * The main miniature is the miniature with the file name <code>main.png</code>. * If this {@link ApplicationDTO} contains no miniatures {@link Optional#empty()} is returned. * * @return An optional with the found URI, or {@link Optional#empty()} if no miniature exists */ @JsonIgnore public Optional<URI> getMainMiniature() { return this.miniatures.stream().filter(uri -> uri.getPath().endsWith("main.png")).findFirst(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ApplicationDTO that = (ApplicationDTO) o; return new EqualsBuilder() .append(typeId, that.typeId) .append(categoryId, that.categoryId) .append(id, that.id) .append(name, that.name) .append(description, that.description) .append(icon, that.icon) .append(miniatures, that.miniatures) .append(scripts, that.scripts) .append(resources, that.resources) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) .append(typeId) .append(categoryId) .append(id) .append(name) .append(description) .append(icon) .append(miniatures) .append(scripts) .append(resources) .toHashCode(); } @JsonPOJOBuilder(buildMethodName = "build", withPrefix = "with") @TranslatableBuilder public static class Builder { private String typeId; private String categoryId; private String id; private String name; private String description; private URI icon; private List<URI> miniatures = new ArrayList<>(); private List<ScriptDTO> scripts = new ArrayList<>(); private List<ResourceDTO> resources = new ArrayList<>(); public Builder() { // Default constructor } public Builder(ApplicationDTO applicationDTO) { this.typeId = applicationDTO.typeId; this.categoryId = applicationDTO.categoryId; this.id = applicationDTO.id; this.name = applicationDTO.name; this.description = applicationDTO.description; this.icon = applicationDTO.icon; this.miniatures = applicationDTO.miniatures; this.scripts = applicationDTO.scripts; this.resources = applicationDTO.resources; } public Builder withTypeId(String typeId) { this.typeId = typeId; return this; } public Builder withCategoryId(String categoryId) { this.categoryId = categoryId; return this; } public Builder withId(String id) { this.id = id; return this; } public Builder withName(String name) { this.name = name; return this; } public Builder withDescription(String description) { this.description = description; return this; } public Builder withResources(List<ResourceDTO> resources) { this.resources = Collections.unmodifiableList(resources); return this; } public Builder withIcon(URI icon) { this.icon = icon; return this; } public Builder withMiniatures(List<URI> miniatures) { this.miniatures = miniatures; return this; } public Builder withScripts(List<ScriptDTO> scriptDTO) { this.scripts = scriptDTO; return this; } public ApplicationDTO build() { return new ApplicationDTO(this); } public String getTypeId() { return typeId; } public String getCategoryId() { return categoryId; } public String getId() { return id; } public String getName() { return name; } } }
1
13,336
Again we should think about moving this pattern to a static final field
PhoenicisOrg-phoenicis
java
@@ -186,7 +186,7 @@ class ConfigManager private function doProcessConfig($backendConfig) { $configPasses = array( - new NormalizerConfigPass(), + new NormalizerConfigPass($this->container->get('easyadmin.controller_resolver')), new DesignConfigPass($this->container->get('twig'), $this->container->getParameter('kernel.debug')), new MenuConfigPass(), new ActionConfigPass(),
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace JavierEguiluz\Bundle\EasyAdminBundle\Configuration; use Symfony\Component\DependencyInjection\ContainerInterface; /** * Manages the loading and processing of backend configuration and it provides * useful methods to get the configuration for the entire backend, for a single * entity, for a single action, etc. * * @author Javier Eguiluz <[email protected]> */ class ConfigManager { private $backendConfig; /** @var ContainerInterface */ private $container; public function __construct(ContainerInterface $container) { $this->container = $container; } /** * Returns the entire backend configuration or just the configuration for * the optional property path. Example: getBackendConfig('design.menu') * * @param string|null $propertyPath * * @return array */ public function getBackendConfig($propertyPath = null) { if (null === $this->backendConfig) { $this->backendConfig = $this->processConfig(); } if (empty($propertyPath)) { return $this->backendConfig; } // turns 'design.menu' into '[design][menu]', the format required by PropertyAccess $propertyPath = '['.str_replace('.', '][', $propertyPath).']'; return $this->container->get('property_accessor')->getValue($this->backendConfig, $propertyPath); } /** * Returns the configuration for the given entity name. * * @param string $entityName * * @deprecated Use getEntityConfig() * @return array The full entity configuration * * @throws \InvalidArgumentException when the entity isn't managed by EasyAdmin */ public function getEntityConfiguration($entityName) { return $this->getEntityConfig($entityName); } /** * Returns the configuration for the given entity name. * * @param string $entityName * * @return array The full entity configuration * * @throws \InvalidArgumentException */ public function getEntityConfig($entityName) { $backendConfig = $this->getBackendConfig(); if (!isset($backendConfig['entities'][$entityName])) { throw new \InvalidArgumentException(sprintf('Entity "%s" is not managed by EasyAdmin.', $entityName)); } return $backendConfig['entities'][$entityName]; } /** * Returns the full entity config for the given entity class. * * @param string $fqcn The full qualified class name of the entity * * @return array|null The full entity configuration */ public function getEntityConfigByClass($fqcn) { $backendConfig = $this->getBackendConfig(); foreach ($backendConfig['entities'] as $entityName => $entityConfig) { if ($entityConfig['class'] === $fqcn) { return $entityConfig; } } } /** * Returns the full action configuration for the given 'entity' and 'view'. * * @param string $entityName * @param string $view * @param string $action * * @return array */ public function getActionConfig($entityName, $view, $action) { try { $entityConfig = $this->getEntityConfig($entityName); } catch (\Exception $e) { $entityConfig = array(); } return isset($entityConfig[$view]['actions'][$action]) ? $entityConfig[$view]['actions'][$action] : array(); } /** * Checks whether the given 'action' is enabled for the given 'entity' and * 'view'. * * @param string $entityName * @param string $view * @param string $action * * @return bool */ public function isActionEnabled($entityName, $view, $action) { if ($view === $action) { return true; } $entityConfig = $this->getEntityConfig($entityName); return !in_array($action, $entityConfig['disabled_actions']) && array_key_exists($action, $entityConfig[$view]['actions']); } /** * It processes the original backend configuration defined by the end-users * to generate the full configuration used by the application. Depending on * the environment, the configuration is processed every time or once and * the result cached for later reuse. * * @return array */ private function processConfig() { $originalBackendConfig = $this->container->getParameter('easyadmin.config'); if (true === $this->container->getParameter('kernel.debug')) { return $this->doProcessConfig($originalBackendConfig); } $cache = $this->container->get('easyadmin.cache.manager'); if ($cache->hasItem('processed_config')) { return $cache->getItem('processed_config'); } $backendConfig = $this->doProcessConfig($originalBackendConfig); $cache->save('processed_config', $backendConfig); return $backendConfig; } /** * It processes the given backend configuration to generate the fully * processed configuration used in the application. * * @param string $backendConfig * * @return array */ private function doProcessConfig($backendConfig) { $configPasses = array( new NormalizerConfigPass(), new DesignConfigPass($this->container->get('twig'), $this->container->getParameter('kernel.debug')), new MenuConfigPass(), new ActionConfigPass(), new MetadataConfigPass($this->container->get('doctrine')), new PropertyConfigPass(), new ViewConfigPass(), new TemplateConfigPass($this->container->getParameter('kernel.root_dir').'/Resources/views'), new DefaultConfigPass(), ); foreach ($configPasses as $configPass) { $backendConfig = $configPass->process($backendConfig); } return $backendConfig; } }
1
10,522
haven't been implemented the `__constructor` for `NormalizerConfigPass` ?
EasyCorp-EasyAdminBundle
php
@@ -190,3 +190,16 @@ class Collect(object): def __repr__(self): return self.__class__.__name__ + '(keys={}, meta_keys={})'.format( self.keys, self.meta_keys) + + [email protected]_module +class WrapFieldsToLists(object): + + def __call__(self, results): + # Wrap dict fields into lists + for key, val in results.items(): + results[key] = [val] + return results + + def __repr__(self): + return '{}()'.format(self.__class__.__name__)
1
from collections.abc import Sequence import mmcv import numpy as np import torch from mmcv.parallel import DataContainer as DC from ..registry import PIPELINES def to_tensor(data): """Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. """ if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif isinstance(data, Sequence) and not mmcv.is_str(data): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError('type {} cannot be converted to tensor.'.format( type(data))) @PIPELINES.register_module class ToTensor(object): def __init__(self, keys): self.keys = keys def __call__(self, results): for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return self.__class__.__name__ + '(keys={})'.format(self.keys) @PIPELINES.register_module class ImageToTensor(object): def __init__(self, keys): self.keys = keys def __call__(self, results): for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = to_tensor(img.transpose(2, 0, 1)) return results def __repr__(self): return self.__class__.__name__ + '(keys={})'.format(self.keys) @PIPELINES.register_module class Transpose(object): def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return self.__class__.__name__ + '(keys={}, order={})'.format( self.keys, self.order) @PIPELINES.register_module class ToDataContainer(object): def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))): self.fields = fields def __call__(self, results): for field in self.fields: field = field.copy() key = field.pop('key') results[key] = DC(results[key], **field) return results def __repr__(self): return self.__class__.__name__ + '(fields={})'.format(self.fields) @PIPELINES.register_module class DefaultFormatBundle(object): """Default formatting bundle. It simplifies the pipeline of formatting common fields, including "img", "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". These fields are formatted as follows. - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - proposals: (1)to tensor, (2)to DataContainer - gt_bboxes: (1)to tensor, (2)to DataContainer - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - gt_labels: (1)to tensor, (2)to DataContainer - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, (3)to DataContainer (stack=True) """ def __call__(self, results): if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if key not in results: continue results[key] = DC(to_tensor(results[key])) if 'gt_masks' in results: results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = DC( to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) return results def __repr__(self): return self.__class__.__name__ @PIPELINES.register_module class Collect(object): """ Collect data from the loader relevant to the specific task. This is usually the last stage of the data loader pipeline. Typically keys is set to some subset of "img", "proposals", "gt_bboxes", "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". The "img_meta" item is always populated. The contents of the "img_meta" dictionary depends on "meta_keys". By default this includes: - "img_shape": shape of the image input to the network as a tuple (h, w, c). Note that images may be zero padded on the bottom/right if the batch tensor is larger than this shape. - "scale_factor": a float indicating the preprocessing scale - "flip": a boolean indicating if image flip transform was used - "filename": path to the image file - "ori_shape": original shape of the image as a tuple (h, w, c) - "pad_shape": image shape after padding - "img_norm_cfg": a dict of normalization information: - mean - per channel mean subtraction - std - per channel std divisor - to_rgb - bool indicating if bgr was converted to rgb """ def __init__(self, keys, meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg')): self.keys = keys self.meta_keys = meta_keys def __call__(self, results): data = {} img_meta = {} for key in self.meta_keys: img_meta[key] = results[key] data['img_meta'] = DC(img_meta, cpu_only=True) for key in self.keys: data[key] = results[key] return data def __repr__(self): return self.__class__.__name__ + '(keys={}, meta_keys={})'.format( self.keys, self.meta_keys)
1
18,847
Docstring is missing.
open-mmlab-mmdetection
py
@@ -74,6 +74,7 @@ bool BaseEvents::loadFromXml() if (!success || !registerEvent(event, node)) { delete event; + return false; } } return true;
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2017 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "baseevents.h" #include "pugicast.h" #include "tools.h" extern LuaEnvironment g_luaEnvironment; bool BaseEvents::loadFromXml() { if (loaded) { std::cout << "[Error - BaseEvents::loadFromXml] It's already loaded." << std::endl; return false; } std::string scriptsName = getScriptBaseName(); std::string basePath = "data/" + scriptsName + "/"; if (getScriptInterface().loadFile(basePath + "lib/" + scriptsName + ".lua") == -1) { std::cout << "[Warning - BaseEvents::loadFromXml] Can not load " << scriptsName << " lib/" << scriptsName << ".lua" << std::endl; } std::string filename = basePath + scriptsName + ".xml"; pugi::xml_document doc; pugi::xml_parse_result result = doc.load_file(filename.c_str()); if (!result) { printXMLError("Error - BaseEvents::loadFromXml", filename, result); return false; } loaded = true; for (auto node : doc.child(scriptsName.c_str()).children()) { Event* event = getEvent(node.name()); if (!event) { continue; } if (!event->configureEvent(node)) { std::cout << "[Warning - BaseEvents::loadFromXml] Failed to configure event" << std::endl; delete event; continue; } bool success; pugi::xml_attribute scriptAttribute = node.attribute("script"); if (scriptAttribute) { std::string scriptFile = "scripts/" + std::string(scriptAttribute.as_string()); success = event->checkScript(basePath, scriptsName, scriptFile) && event->loadScript(basePath + scriptFile); } else { success = event->loadFunction(node.attribute("function")); } if (!success || !registerEvent(event, node)) { delete event; } } return true; } bool BaseEvents::reload() { loaded = false; clear(); return loadFromXml(); } Event::Event(LuaScriptInterface* interface) : scriptInterface(interface) {} bool Event::checkScript(const std::string& basePath, const std::string& scriptsName, const std::string& scriptFile) const { LuaScriptInterface* testInterface = g_luaEnvironment.getTestInterface(); testInterface->reInitState(); if (testInterface->loadFile(std::string(basePath + "lib/" + scriptsName + ".lua")) == -1) { std::cout << "[Warning - Event::checkScript] Can not load " << scriptsName << " lib/" << scriptsName << ".lua" << std::endl; } if (scriptId != 0) { std::cout << "[Failure - Event::checkScript] scriptid = " << scriptId << std::endl; return false; } if (testInterface->loadFile(basePath + scriptFile) == -1) { std::cout << "[Warning - Event::checkScript] Can not load script: " << scriptFile << std::endl; std::cout << testInterface->getLastLuaError() << std::endl; return false; } int32_t id = testInterface->getEvent(getScriptEventName()); if (id == -1) { std::cout << "[Warning - Event::checkScript] Event " << getScriptEventName() << " not found. " << scriptFile << std::endl; return false; } return true; } bool Event::loadScript(const std::string& scriptFile) { if (!scriptInterface || scriptId != 0) { std::cout << "Failure: [Event::loadScript] scriptInterface == nullptr. scriptid = " << scriptId << std::endl; return false; } if (scriptInterface->loadFile(scriptFile) == -1) { std::cout << "[Warning - Event::loadScript] Can not load script. " << scriptFile << std::endl; std::cout << scriptInterface->getLastLuaError() << std::endl; return false; } int32_t id = scriptInterface->getEvent(getScriptEventName()); if (id == -1) { std::cout << "[Warning - Event::loadScript] Event " << getScriptEventName() << " not found. " << scriptFile << std::endl; return false; } scripted = true; scriptId = id; return true; } bool CallBack::loadCallBack(LuaScriptInterface* interface, const std::string& name) { if (!interface) { std::cout << "Failure: [CallBack::loadCallBack] scriptInterface == nullptr" << std::endl; return false; } scriptInterface = interface; int32_t id = scriptInterface->getEvent(name.c_str()); if (id == -1) { std::cout << "[Warning - CallBack::loadCallBack] Event " << name << " not found." << std::endl; return false; } scriptId = id; loaded = true; return true; }
1
14,043
I don't see the reason of this return as it will stop loading in half because an error, thats bad imo. Or you load it all and report errors or you load nothing and terminate, the first is as it always was so I wouldn't change that.
otland-forgottenserver
cpp
@@ -58,9 +58,9 @@ func NewMockKademlia(o ...Option) *Mock { return m } -// AddPeer is called when a peer is added to the topology backlog +// AddPeers is called when a peers are added to the topology backlog // for further processing by connectivity strategy. -func (m *Mock) AddPeer(ctx context.Context, addr swarm.Address) error { +func (m *Mock) AddPeers(ctx context.Context, addr ...swarm.Address) error { panic("not implemented") // TODO: Implement }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mock import ( "context" "sync" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" ) type AddrTuple struct { Addr swarm.Address // the peer address PO uint8 // the po } func WithEachPeerRevCalls(addrs ...AddrTuple) Option { return optionFunc(func(m *Mock) { for _, a := range addrs { a := a m.eachPeerRev = append(m.eachPeerRev, a) } }) } func WithDepth(d uint8) Option { return optionFunc(func(m *Mock) { m.depth = d }) } func WithDepthCalls(d ...uint8) Option { return optionFunc(func(m *Mock) { m.depthReplies = d }) } type Mock struct { mtx sync.Mutex peers []swarm.Address eachPeerRev []AddrTuple depth uint8 depthReplies []uint8 depthCalls int trigs []chan struct{} trigMtx sync.Mutex } func NewMockKademlia(o ...Option) *Mock { m := &Mock{} for _, v := range o { v.apply(m) } return m } // AddPeer is called when a peer is added to the topology backlog // for further processing by connectivity strategy. func (m *Mock) AddPeer(ctx context.Context, addr swarm.Address) error { panic("not implemented") // TODO: Implement } func (m *Mock) ClosestPeer(addr swarm.Address) (peerAddr swarm.Address, err error) { panic("not implemented") // TODO: Implement } // EachPeer iterates from closest bin to farthest func (m *Mock) EachPeer(f topology.EachPeerFunc) error { m.mtx.Lock() defer m.mtx.Unlock() for i := len(m.peers) - 1; i > 0; i-- { stop, _, err := f(m.peers[i], uint8(i)) if stop { return nil } if err != nil { return err } } return nil } // EachPeerRev iterates from farthest bin to closest func (m *Mock) EachPeerRev(f topology.EachPeerFunc) error { m.mtx.Lock() defer m.mtx.Unlock() for _, v := range m.eachPeerRev { stop, _, err := f(v.Addr, v.PO) if stop { return nil } if err != nil { return err } } return nil } func (m *Mock) NeighborhoodDepth() uint8 { m.mtx.Lock() defer m.mtx.Unlock() m.depthCalls++ if len(m.depthReplies) > 0 { return m.depthReplies[m.depthCalls] } return m.depth } // Connected is called when a peer dials in. func (m *Mock) Connected(_ context.Context, addr swarm.Address) error { m.mtx.Lock() m.peers = append(m.peers, addr) m.mtx.Unlock() m.Trigger() return nil } // Disconnected is called when a peer disconnects. func (m *Mock) Disconnected(_ swarm.Address) { m.Trigger() } func (m *Mock) SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) { channel := make(chan struct{}, 1) var closeOnce sync.Once m.trigMtx.Lock() defer m.trigMtx.Unlock() m.trigs = append(m.trigs, channel) unsubscribe = func() { m.trigMtx.Lock() defer m.trigMtx.Unlock() for i, c := range m.trigs { if c == channel { m.trigs = append(m.trigs[:i], m.trigs[i+1:]...) break } } closeOnce.Do(func() { close(channel) }) } return channel, unsubscribe } func (m *Mock) Trigger() { m.trigMtx.Lock() defer m.trigMtx.Unlock() for _, c := range m.trigs { select { case c <- struct{}{}: default: } } } func (m *Mock) ResetPeers() { m.mtx.Lock() defer m.mtx.Unlock() m.peers = nil m.eachPeerRev = nil } func (m *Mock) Close() error { panic("not implemented") // TODO: Implement } type Option interface { apply(*Mock) } type optionFunc func(*Mock) func (f optionFunc) apply(r *Mock) { f(r) }
1
11,860
change from AddPeer to AddPeers added a space in some occurrences
ethersphere-bee
go
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; +import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.Arrays;
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.thoughtworks.selenium; import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; import java.lang.reflect.Method; import java.util.Arrays; public class CSVTest { Method CSV; @Before public void setUp() { Method[] methods = HttpCommandProcessor.class.getDeclaredMethods(); for (int i = 0; i < methods.length; i++) { if ("parseCSV".equals(methods[i].getName())) { Method csvMethod = methods[i]; csvMethod.setAccessible(true); CSV = csvMethod; break; } } } public String[] parseCSV(String input, String[] expected) { System.out.print(input + ": "); String[] output; try { output = (String[]) CSV.invoke(null, input); } catch (Exception e) { throw new RuntimeException(e); } System.out.println(Arrays.asList(output).toString()); compareStringArrays(expected, output); return output; } @Test public void testSimple() { String input = "1,2,3"; String[] expected = new String[] {"1", "2", "3"}; parseCSV(input, expected); } @Test public void testBackSlash() { String input = "1,2\\,3,4"; // Java-escaped, but not CSV-escaped String[] expected = new String[] {"1", "2,3", "4"}; // backslash should disappear in output parseCSV(input, expected); } @Test public void testRandomSingleBackSlash() { String input = "1,\\2,3"; // Java-escaped, but not CSV-escaped String[] expected = new String[] {"1", "2", "3"}; // backslash should disappear parseCSV(input, expected); } @Test public void testDoubleBackSlashBeforeComma() { String input = "1,2\\\\,3"; // Java-escaped and CSV-escaped String[] expected = new String[] {"1", "2\\", "3"}; // one backslash should disappear in output parseCSV(input, expected); } @Test public void testRandomDoubleBackSlash() { String input = "1,\\\\2,3"; // Java-escaped, and CSV-escaped String[] expected = new String[] {"1", "\\2", "3"}; // one backslash should disappear in output parseCSV(input, expected); } @Test public void testTripleBackSlashBeforeComma() { String input = "1,2\\\\\\,3,4"; // Java-escaped, and CSV-escaped String[] expected = new String[] {"1", "2\\,3", "4"}; // one backslash should disappear in // output parseCSV(input, expected); } @Test public void test4BackSlashesBeforeComma() { String input = "1,2\\\\\\\\,3"; // Java-escaped, and CSV-escaped String[] expected = new String[] {"1", "2\\\\", "3"}; // two backslashes should disappear in // output parseCSV(input, expected); } public void compareStringArrays(String[] expected, String[] actual) { assertEquals("Wrong number of elements", expected.length, actual.length); for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], actual[i]); } } }
1
19,390
Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC.
SeleniumHQ-selenium
java
@@ -29,15 +29,15 @@ import ( const NodePortLocalChain = "ANTREA-NODE-PORT-LOCAL" // IPTableRules provides a client to perform IPTABLES operations -type iptablesRules struct { +type IPTableRules struct { name string table *iptables.Client } -// NewIPTableRules retruns a new instance of IPTableRules -func NewIPTableRules() *iptablesRules { +// NewIPTableRules returns a new instance of IPTableRules +func NewIPTableRules() *IPTableRules { iptInstance, _ := iptables.New(true, false) - iptRule := iptablesRules{ + iptRule := IPTableRules{ name: "NPL", table: iptInstance, }
1
// +build !windows // Copyright 2020 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rules import ( "bytes" "fmt" "github.com/vmware-tanzu/antrea/pkg/agent/util/iptables" "k8s.io/klog" ) // NodePortLocalChain is the name of the chain in IPTABLES for Node Port Local const NodePortLocalChain = "ANTREA-NODE-PORT-LOCAL" // IPTableRules provides a client to perform IPTABLES operations type iptablesRules struct { name string table *iptables.Client } // NewIPTableRules retruns a new instance of IPTableRules func NewIPTableRules() *iptablesRules { iptInstance, _ := iptables.New(true, false) iptRule := iptablesRules{ name: "NPL", table: iptInstance, } return &iptRule } // Init initializes IPTABLES rules for NPL. Currently it deletes existing rules to ensure that no stale entries are present. func (ipt *iptablesRules) Init() error { return ipt.CreateChains() } // CreateChains creates the chain NodePortLocalChain in NAT table. // All DNAT rules for NPL would be added in this chain. func (ipt *iptablesRules) CreateChains() error { err := ipt.table.EnsureChain(iptables.NATTable, NodePortLocalChain) if err != nil { return fmt.Errorf("IPTABLES chain creation in NAT table failed for NPL with error: %v", err) } ruleSpec := []string{ "-p", "tcp", "-j", NodePortLocalChain, } err = ipt.table.EnsureRule(iptables.NATTable, iptables.PreRoutingChain, ruleSpec) if err != nil { return fmt.Errorf("IPTABLES rule creation in NAT table failed for NPL with error: %v", err) } return nil } // AddRule appends a DNAT rule in NodePortLocalChain chain of NAT table func (ipt *iptablesRules) AddRule(port int, podIP string) error { ruleSpec := []string{ "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprint(port), "-j", "DNAT", "--to-destination", podIP, } err := ipt.table.EnsureRule(iptables.NATTable, NodePortLocalChain, ruleSpec) if err != nil { return fmt.Errorf("IPTABLES rule creation failed for NPL with error: %v", err) } klog.Infof("successfully added rule for Pod %s: %d", podIP, port) return nil } // AddAllRules constructs a list of iptables rules for the NPL chain and performs a // iptables-restore on this chain. It uses --no-flush to keep the previous rules intact. func (ipt *iptablesRules) AddAllRules(nplList []PodNodePort) error { iptablesData := bytes.NewBuffer(nil) writeLine(iptablesData, "*nat") writeLine(iptablesData, iptables.MakeChainLine(NodePortLocalChain)) for _, nplData := range nplList { destination := nplData.PodIP + ":" + fmt.Sprint(nplData.PodPort) writeLine(iptablesData, []string{ "-A", NodePortLocalChain, "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprint(nplData.NodePort), "-j", "DNAT", "--to-destination", destination, }...) } writeLine(iptablesData, "COMMIT") if err := ipt.table.Restore(iptablesData.Bytes(), false, false); err != nil { return err } return nil } // DeleteRule deletes a specific NPL rule from NodePortLocalChain chain func (ipt *iptablesRules) DeleteRule(port int, podip string) error { klog.Infof("Deleting rule with port %v and podip %v", port, podip) ruleSpec := []string{ "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprint(port), "-j", "DNAT", "--to-destination", podip, } err := ipt.table.DeleteRule(iptables.NATTable, NodePortLocalChain, ruleSpec) if err != nil { return fmt.Errorf("failed to delete IPTABLES rule for NPL: %v", err) } return nil } // DeleteAllRules deletes all NPL rules programmed in the node func (ipt *iptablesRules) DeleteAllRules() error { exists, err := ipt.table.ChainExists(iptables.NATTable, NodePortLocalChain) if err != nil { return fmt.Errorf("failed to check if NodePortLocal chain exists in NAT table: %v", err) } if !exists { return nil } ruleSpec := []string{ "-p", "tcp", "-j", NodePortLocalChain, } err = ipt.table.DeleteRule(iptables.NATTable, iptables.PreRoutingChain, ruleSpec) if err != nil { return fmt.Errorf("failed to delete rule from prerouting chain for NPL: %v", err) } err = ipt.table.DeleteChain(iptables.NATTable, NodePortLocalChain) if err != nil { return fmt.Errorf("failed to delete NodePortLocal Chain from NAT table: %v", err) } return nil } // Join all words with spaces, terminate with newline and write to buf. func writeLine(buf *bytes.Buffer, words ...string) { // We avoid strings.Join for performance reasons. for i := range words { buf.WriteString(words[i]) if i < len(words)-1 { buf.WriteByte(' ') } else { buf.WriteByte('\n') } } }
1
27,808
Do you remove "s" from "iptables" to IPTable" on purpose? I feel it should be "IPTablesRules".
antrea-io-antrea
go
@@ -16,17 +16,17 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.priv; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext; +import org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.JsonRpcMethod; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.PrivacyParameters; -public class PrivGetPrivacyPrecompileAddress extends PrivacyApiMethod { +public class PrivGetPrivacyPrecompileAddress implements JsonRpcMethod { private final Integer privacyAddress; public PrivGetPrivacyPrecompileAddress(final PrivacyParameters privacyParameters) { - super(privacyParameters); privacyAddress = privacyParameters.getPrivacyAddress(); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.priv; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.PrivacyParameters; public class PrivGetPrivacyPrecompileAddress extends PrivacyApiMethod { private final Integer privacyAddress; public PrivGetPrivacyPrecompileAddress(final PrivacyParameters privacyParameters) { super(privacyParameters); privacyAddress = privacyParameters.getPrivacyAddress(); } @Override public String getName() { return RpcMethod.PRIV_GET_PRIVACY_PRECOMPILE_ADDRESS.getMethodName(); } @Override public JsonRpcResponse doResponse(final JsonRpcRequestContext requestContext) { return new JsonRpcSuccessResponse( requestContext.getRequest().getId(), Address.privacyPrecompiled(privacyAddress).toString()); } }
1
20,833
nit: not your problem - but I wonder if this should have been a "ConstantResponseRpc" - which takes a name and a value .... maybe a more reusable concept.
hyperledger-besu
java
@@ -26,7 +26,7 @@ import ( // instrumentationName is empty, then a implementation defined default name // will be used instead. // -// This is short for MeterProvider().Meter(name) +// This is short for otel.GetMeterProvider().Meter(name) func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { return GetMeterProvider().Meter(instrumentationName, opts...) }
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" ) // Meter creates an implementation of the Meter interface from the global // MeterProvider. The instrumentationName must be the name of the library // providing instrumentation. This name may be the same as the instrumented // code only if that code provides built-in instrumentation. If the // instrumentationName is empty, then a implementation defined default name // will be used instead. // // This is short for MeterProvider().Meter(name) func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { return GetMeterProvider().Meter(instrumentationName, opts...) } // GetMeterProvider returns the registered global meter provider. If // none is registered then a default meter provider is returned that // forwards the Meter interface to the first registered Meter. // // Use the meter provider to create a named meter. E.g. // meter := global.MeterProvider().Meter("example.com/foo") // or // meter := global.Meter("example.com/foo") func GetMeterProvider() metric.MeterProvider { return global.MeterProvider() } // SetMeterProvider registers `mp` as the global meter provider. func SetMeterProvider(mp metric.MeterProvider) { global.SetMeterProvider(mp) }
1
14,237
These changes have been resolved upstream. We can revert changes to this file when synced.
open-telemetry-opentelemetry-go
go
@@ -33,7 +33,7 @@ T = TypeVar("T") class Partition(Generic[T]): """ - Partition is the representation of a logical slice across an axis of a pipeline's work + A Partition represents a single slice of the entire set of a job's possible work. Args: value (Any): The object for this partition
1
import inspect from abc import ABC, abstractmethod from datetime import datetime, time from enum import Enum from typing import Any, Callable, Dict, Generic, List, NamedTuple, Optional, TypeVar, Union, cast import pendulum from dagster import check from ...seven.compat.pendulum import PendulumDateTime, to_timezone from ...utils import frozenlist, merge_dicts from ...utils.schedules import schedule_execution_time_iterator from ..decorator_utils import get_function_params from ..errors import ( DagsterInvalidDefinitionError, DagsterInvalidInvocationError, DagsterInvariantViolationError, DagsterUnknownPartitionError, ScheduleExecutionError, user_code_error_boundary, ) from ..storage.pipeline_run import PipelineRun from ..storage.tags import check_tags from .mode import DEFAULT_MODE_NAME from .run_request import RunRequest, SkipReason from .schedule import ScheduleDefinition, ScheduleEvaluationContext from .utils import check_valid_name DEFAULT_DATE_FORMAT = "%Y-%m-%d" T = TypeVar("T") class Partition(Generic[T]): """ Partition is the representation of a logical slice across an axis of a pipeline's work Args: value (Any): The object for this partition name (str): Name for this partition """ def __init__(self, value: T, name: Optional[str] = None): self._value = value self._name = cast(str, check.opt_str_param(name, "name", str(value))) @property def value(self) -> T: return self._value @property def name(self) -> str: return self._name def schedule_partition_range( start: datetime, end: Optional[datetime], cron_schedule: str, fmt: str, timezone: Optional[str], execution_time_to_partition_fn: Callable, current_time: Optional[datetime], ) -> List[Partition[datetime]]: if end and start > end: raise DagsterInvariantViolationError( 'Selected date range start "{start}" is after date range end "{end}'.format( start=start.strftime(fmt), end=end.strftime(fmt), ) ) tz = timezone if timezone else "UTC" _current_time = current_time if current_time else pendulum.now(tz) # Coerce to the definition timezone _start = ( to_timezone(start, tz) if isinstance(start, PendulumDateTime) else pendulum.instance(start, tz=tz) ) _current_time = ( to_timezone(_current_time, tz) if isinstance(_current_time, PendulumDateTime) else pendulum.instance(_current_time, tz=tz) ) # The end partition time should be before the last partition that # executes before the current time end_partition_time = execution_time_to_partition_fn(_current_time) # The partition set has an explicit end time that represents the end of the partition range if end: _end = ( to_timezone(end, tz) if isinstance(end, PendulumDateTime) else pendulum.instance(end, tz=tz) ) # If the explicit end time is before the last partition time, # update the end partition time end_partition_time = min(_end, end_partition_time) end_timestamp = end_partition_time.timestamp() partitions: List[Partition[datetime]] = [] for next_time in schedule_execution_time_iterator(_start.timestamp(), cron_schedule, tz): partition_time = execution_time_to_partition_fn(next_time) if partition_time.timestamp() > end_timestamp: break if partition_time.timestamp() < _start.timestamp(): continue partitions.append(Partition(value=partition_time, name=partition_time.strftime(fmt))) return partitions class ScheduleType(Enum): HOURLY = "HOURLY" DAILY = "DAILY" WEEKLY = "WEEKLY" MONTHLY = "MONTHLY" class PartitionsDefinition(ABC, Generic[T]): @abstractmethod def get_partitions(self, current_time: Optional[datetime] = None) -> List[Partition[T]]: ... class StaticPartitionsDefinition(PartitionsDefinition[T]): # pylint: disable=unsubscriptable-object def __init__(self, partitions: List[Partition[T]]): self._partitions = check.list_param(partitions, "partitions", of_type=Partition) def get_partitions( self, current_time: Optional[datetime] = None # pylint: disable=unused-argument ) -> List[Partition[T]]: return self._partitions class ScheduleTimeBasedPartitionsDefinition( PartitionsDefinition[datetime], # pylint: disable=unsubscriptable-object NamedTuple( "_ScheduleTimeBasedPartitionsDefinition", [ ("schedule_type", ScheduleType), ("start", datetime), ("execution_time", time), ("execution_day", Optional[int]), ("end", Optional[datetime]), ("fmt", str), ("timezone", Optional[str]), ("offset", Optional[int]), ], ), ): """Computes the partitions backwards from the scheduled execution times""" def __new__( cls, schedule_type: ScheduleType, start: datetime, execution_time: Optional[time] = None, execution_day: Optional[int] = None, end: Optional[datetime] = None, fmt: Optional[str] = None, timezone: Optional[str] = None, offset: Optional[int] = None, ): if end is not None: check.invariant( start <= end, f'Selected date range start "{start}" ' f'is after date range end "{end}"'.format( start=start.strftime(fmt) if fmt is not None else start, end=cast(datetime, end).strftime(fmt) if fmt is not None else end, ), ) if schedule_type in [ScheduleType.HOURLY, ScheduleType.DAILY]: check.invariant( not execution_day, f'Execution day should not be provided for schedule type "{schedule_type}"', ) elif schedule_type is ScheduleType.WEEKLY: execution_day = execution_day if execution_day is not None else 0 check.invariant( execution_day is not None and 0 <= execution_day <= 6, f'Execution day "{execution_day}" must be between 0 and 6 for ' f'schedule type "{schedule_type}"', ) elif schedule_type is ScheduleType.MONTHLY: execution_day = execution_day if execution_day is not None else 1 check.invariant( execution_day is not None and 1 <= execution_day <= 31, f'Execution day "{execution_day}" must be between 1 and 31 for ' f'schedule type "{schedule_type}"', ) return super(ScheduleTimeBasedPartitionsDefinition, cls).__new__( cls, check.inst_param(schedule_type, "schedule_type", ScheduleType), check.inst_param(start, "start", datetime), check.opt_inst_param(execution_time, "execution_time", time, time(0, 0)), check.opt_int_param( execution_day, "execution_day", ), check.opt_inst_param(end, "end", datetime), cast(str, check.opt_str_param(fmt, "fmt", default=DEFAULT_DATE_FORMAT)), check.opt_str_param(timezone, "timezone", default="UTC"), check.opt_int_param(offset, "offset", default=1), ) def get_partitions(self, current_time: Optional[datetime] = None) -> List[Partition[datetime]]: check.opt_inst_param(current_time, "current_time", datetime) return schedule_partition_range( start=self.start, end=self.end, cron_schedule=self.get_cron_schedule(), fmt=self.fmt, timezone=self.timezone, execution_time_to_partition_fn=self.get_execution_time_to_partition_fn(), current_time=current_time, ) def get_cron_schedule(self) -> str: return get_cron_schedule(self.schedule_type, self.execution_time, self.execution_day) def get_execution_time_to_partition_fn(self) -> Callable[[datetime], datetime]: if self.schedule_type is ScheduleType.HOURLY: return lambda d: pendulum.instance(d).subtract(hours=self.offset, minutes=d.minute) elif self.schedule_type is ScheduleType.DAILY: return lambda d: pendulum.instance(d).subtract( days=self.offset, hours=d.hour, minutes=d.minute ) elif self.schedule_type is ScheduleType.WEEKLY: execution_day = cast(int, self.execution_day) day_difference = (execution_day - (self.start.weekday() + 1)) % 7 return lambda d: pendulum.instance(d).subtract( weeks=self.offset, days=day_difference, hours=d.hour, minutes=d.minute ) elif self.schedule_type is ScheduleType.MONTHLY: execution_day = cast(int, self.execution_day) return lambda d: pendulum.instance(d).subtract( months=self.offset, days=execution_day - 1, hours=d.hour, minutes=d.minute ) else: check.assert_never(self.schedule_type) class DynamicPartitionsDefinition( PartitionsDefinition, NamedTuple( "_DynamicPartitionsDefinition", [("partition_fn", Callable[[Optional[datetime]], List[Partition]])], ), ): def __new__(cls, partition_fn: Callable[[Optional[datetime]], List[Partition]]): return super(DynamicPartitionsDefinition, cls).__new__( cls, check.callable_param(partition_fn, "partition_fn") ) def get_partitions(self, current_time: Optional[datetime] = None) -> List[Partition]: return self.partition_fn(current_time) class PartitionSetDefinition(Generic[T]): """ Defines a partition set, representing the set of slices making up an axis of a pipeline Args: name (str): Name for this partition set pipeline_name (str): The name of the pipeline definition partition_fn (Optional[Callable[void, List[Partition]]]): User-provided function to define the set of valid partition objects. solid_selection (Optional[List[str]]): A list of solid subselection (including single solid names) to execute with this partition. e.g. ``['*some_solid+', 'other_solid']`` mode (Optional[str]): The mode to apply when executing this partition. (default: 'default') run_config_fn_for_partition (Callable[[Partition], Any]): A function that takes a :py:class:`~dagster.Partition` and returns the run configuration that parameterizes the execution for this partition. tags_fn_for_partition (Callable[[Partition], Optional[dict[str, str]]]): A function that takes a :py:class:`~dagster.Partition` and returns a list of key value pairs that will be added to the generated run for this partition. partitions_def (Optional[PartitionsDefinition]): A set of parameters used to construct the set of valid partition objects. """ def __init__( self, name: str, pipeline_name: str, partition_fn: Optional[Callable[..., Union[List[Partition[T]], List[str]]]] = None, solid_selection: Optional[List[str]] = None, mode: Optional[str] = None, run_config_fn_for_partition: Callable[[Partition[T]], Any] = lambda _partition: {}, tags_fn_for_partition: Callable[ [Partition[T]], Optional[Dict[str, str]] ] = lambda _partition: {}, partitions_def: Optional[ PartitionsDefinition[T] # pylint: disable=unsubscriptable-object ] = None, ): check.invariant( partition_fn is not None or partitions_def is not None, "One of `partition_fn` or `partitions_def` must be supplied.", ) check.invariant( not (partition_fn and partitions_def), "Only one of `partition_fn` or `partitions_def` must be supplied.", ) _wrap_partition_fn = None if partition_fn is not None: partition_fn_param_count = len(inspect.signature(partition_fn).parameters) def _wrap_partition(x: Union[str, Partition]) -> Partition: if isinstance(x, Partition): return x if isinstance(x, str): return Partition(x) raise DagsterInvalidDefinitionError( "Expected <Partition> | <str>, received {type}".format(type=type(x)) ) def _wrap_partition_fn(current_time=None) -> List[Partition]: if not current_time: current_time = pendulum.now("UTC") check.callable_param(partition_fn, "partition_fn") if partition_fn_param_count == 1: obj_list = cast( Callable[..., List[Union[Partition[T], str]]], partition_fn, )(current_time) else: obj_list = partition_fn() # type: ignore return [_wrap_partition(obj) for obj in obj_list] self._name = check_valid_name(name) self._pipeline_name = check.opt_str_param(pipeline_name, "pipeline_name") self._partition_fn = _wrap_partition_fn self._solid_selection = check.opt_nullable_list_param( solid_selection, "solid_selection", of_type=str ) self._mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME) self._user_defined_run_config_fn_for_partition = check.callable_param( run_config_fn_for_partition, "run_config_fn_for_partition" ) self._user_defined_tags_fn_for_partition = check.callable_param( tags_fn_for_partition, "tags_fn_for_partition" ) check.opt_inst_param(partitions_def, "partitions_def", PartitionsDefinition) if partitions_def is not None: self._partitions_def = partitions_def else: if partition_fn is None: check.failed("One of `partition_fn` or `partitions_def` must be supplied.") self._partitions_def = DynamicPartitionsDefinition(partition_fn=_wrap_partition_fn) @property def name(self): return self._name @property def pipeline_name(self): return self._pipeline_name @property def solid_selection(self): return self._solid_selection @property def mode(self): return self._mode def run_config_for_partition(self, partition: Partition[T]) -> Dict[str, Any]: return self._user_defined_run_config_fn_for_partition(partition) def tags_for_partition(self, partition: Partition[T]) -> Dict[str, str]: user_tags = self._user_defined_tags_fn_for_partition(partition) check_tags(user_tags, "user_tags") tags = merge_dicts(user_tags, PipelineRun.tags_for_partition_set(self, partition)) return tags def get_partitions(self, current_time: Optional[datetime] = None) -> List[Partition[T]]: """Return the set of known partitions. Arguments: current_time (Optional[datetime]): The evaluation time for the partition function, which is passed through to the ``partition_fn`` (if it accepts a parameter). Defaults to the current time in UTC. """ return self._partitions_def.get_partitions(current_time) def get_partition(self, name: str) -> Partition[T]: for partition in self.get_partitions(): if partition.name == name: return partition check.failed("Partition name {} not found!".format(name)) def get_partition_names(self, current_time: Optional[datetime] = None) -> List[str]: return [part.name for part in self.get_partitions(current_time)] def create_schedule_definition( self, schedule_name, cron_schedule, partition_selector, should_execute=None, environment_vars=None, execution_timezone=None, description=None, decorated_fn=None, job=None, ): """Create a ScheduleDefinition from a PartitionSetDefinition. Arguments: schedule_name (str): The name of the schedule. cron_schedule (str): A valid cron string for the schedule partition_selector (Callable[ScheduleEvaluationContext, PartitionSetDefinition], Union[Partition, List[Partition]]): Function that determines the partition to use at a given execution time. Can return either a single Partition or a list of Partitions. For time-based partition sets, will likely be either `identity_partition_selector` or a selector returned by `create_offset_partition_selector`. should_execute (Optional[function]): Function that runs at schedule execution time that determines whether a schedule should execute. Defaults to a function that always returns ``True``. environment_vars (Optional[dict]): The environment variables to set for the schedule. execution_timezone (Optional[str]): Timezone in which the schedule should run. Supported strings for timezones are the ones provided by the `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles". description (Optional[str]): A human-readable description of the schedule. Returns: PartitionScheduleDefinition: The generated PartitionScheduleDefinition for the partition selector """ check.str_param(schedule_name, "schedule_name") check.str_param(cron_schedule, "cron_schedule") check.opt_callable_param(should_execute, "should_execute") check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str) check.callable_param(partition_selector, "partition_selector") check.opt_str_param(execution_timezone, "execution_timezone") check.opt_str_param(description, "description") def _execution_fn(context): check.inst_param(context, "context", ScheduleEvaluationContext) with user_code_error_boundary( ScheduleExecutionError, lambda: f"Error occurred during the execution of partition_selector for schedule {schedule_name}", ): selector_result = partition_selector(context, self) if isinstance(selector_result, SkipReason): yield selector_result return selected_partitions = ( selector_result if isinstance(selector_result, (frozenlist, list)) else [selector_result] ) check.is_list(selected_partitions, of_type=Partition) if not selected_partitions: yield SkipReason("Partition selector returned an empty list of partitions.") return missing_partition_names = [ partition.name for partition in selected_partitions if partition.name not in self.get_partition_names(context.scheduled_execution_time) ] if missing_partition_names: yield SkipReason( "Partition selector returned partition" + ("s" if len(missing_partition_names) > 1 else "") + f" not in the partition set: {', '.join(missing_partition_names)}." ) return with user_code_error_boundary( ScheduleExecutionError, lambda: f"Error occurred during the execution of should_execute for schedule {schedule_name}", ): if should_execute and not should_execute(context): yield SkipReason( "should_execute function for {schedule_name} returned false.".format( schedule_name=schedule_name ) ) return for selected_partition in selected_partitions: with user_code_error_boundary( ScheduleExecutionError, lambda: f"Error occurred during the execution of run_config_fn for schedule {schedule_name}", ): run_config = self.run_config_for_partition(selected_partition) with user_code_error_boundary( ScheduleExecutionError, lambda: f"Error occurred during the execution of tags_fn for schedule {schedule_name}", ): tags = self.tags_for_partition(selected_partition) yield RunRequest( run_key=selected_partition.name if len(selected_partitions) > 0 else None, run_config=run_config, tags=tags, ) return PartitionScheduleDefinition( name=schedule_name, cron_schedule=cron_schedule, pipeline_name=self._pipeline_name, tags_fn=None, solid_selection=self._solid_selection, mode=self._mode, should_execute=None, environment_vars=environment_vars, partition_set=self, execution_timezone=execution_timezone, execution_fn=_execution_fn, description=description, decorated_fn=decorated_fn, job=job, ) class PartitionScheduleDefinition(ScheduleDefinition): __slots__ = ["_partition_set"] def __init__( self, name, cron_schedule, pipeline_name, tags_fn, solid_selection, mode, should_execute, environment_vars, partition_set, run_config_fn=None, execution_timezone=None, execution_fn=None, description=None, decorated_fn=None, job=None, ): super(PartitionScheduleDefinition, self).__init__( name=check_valid_name(name), cron_schedule=cron_schedule, pipeline_name=pipeline_name, run_config_fn=run_config_fn, tags_fn=tags_fn, solid_selection=solid_selection, mode=mode, should_execute=should_execute, environment_vars=environment_vars, execution_timezone=execution_timezone, execution_fn=execution_fn, description=description, job=job, ) self._partition_set = check.inst_param( partition_set, "partition_set", PartitionSetDefinition ) self._decorated_fn = check.opt_callable_param(decorated_fn, "decorated_fn") def __call__(self, *args, **kwargs): if not self._decorated_fn: raise DagsterInvalidInvocationError( "Only partition schedules created using one of the partition schedule decorators " "can be directly invoked." ) if len(args) == 0 and len(kwargs) == 0: raise DagsterInvalidInvocationError( "Schedule decorated function has date argument, but no date argument was " "provided when invoking." ) if len(args) + len(kwargs) > 1: raise DagsterInvalidInvocationError( "Schedule invocation received multiple arguments. Only a first " "positional date parameter should be provided when invoking." ) date_param_name = get_function_params(self._decorated_fn)[0].name if args: date = check.opt_inst_param(args[0], date_param_name, datetime) else: if date_param_name not in kwargs: raise DagsterInvalidInvocationError( f"Schedule invocation expected argument '{date_param_name}'." ) date = check.opt_inst_param(kwargs[date_param_name], date_param_name, datetime) return self._decorated_fn(date) def get_partition_set(self): return self._partition_set class PartitionedConfig(Generic[T]): """Defines a way of configuring a job where the job can be run on one of a discrete set of partitions, and each partition corresponds to run configuration for the job.""" def __init__( self, partitions_def: PartitionsDefinition[T], # pylint: disable=unsubscriptable-object run_config_for_partition_fn: Callable[[Partition[T]], Dict[str, Any]], ): self._partitions = check.inst_param(partitions_def, "partitions_def", PartitionsDefinition) self._run_config_for_partition_fn = check.callable_param( run_config_for_partition_fn, "run_config_for_partition_fn" ) @property def partitions_def(self) -> PartitionsDefinition[T]: # pylint: disable=unsubscriptable-object return self._partitions @property def run_config_for_partition_fn(self) -> Callable[[Partition[T]], Dict[str, Any]]: return self._run_config_for_partition_fn def get_partition_keys(self, current_time: Optional[datetime] = None) -> List[str]: return [partition.name for partition in self.partitions_def.get_partitions(current_time)] def get_run_config(self, partition_key: str) -> Dict[str, Any]: matching = [ partition for partition in self.partitions_def.get_partitions() if partition.name == partition_key ] if not matching: raise DagsterUnknownPartitionError( f"Could not find a partition with key `{partition_key}`" ) return self.run_config_for_partition_fn(matching[0]) def static_partitioned_config( partition_keys: List[str], ) -> Callable[[Callable[[str], Dict[str, Any]]], PartitionedConfig]: """Creates a static partitioned config for a job. The provided partition_keys returns a static list of strings identifying the set of partitions, given an optional datetime argument (representing the current time). The list of partitions is static, so while the run config returned by the decorated function may change over time, the list of valid partition keys does not. This has performance advantages over `dynamic_partitioned_config` in terms of loading different partition views in Dagit. The decorated function takes in a partition key and returns a valid run config for a particular target job. Args: partition_keys (List[str]): A list of valid partition keys, which serve as the range of values that can be provided to the decorated run config function. """ check.list_param(partition_keys, "partition_keys", str) def inner(fn: Callable[[str], Dict[str, Any]]) -> PartitionedConfig: check.callable_param(fn, "fn") partitions_list = [Partition(key) for key in partition_keys] def _run_config_wrapper(partition: Partition[T]) -> Dict[str, Any]: return fn(partition.name) return PartitionedConfig( partitions_def=StaticPartitionsDefinition(partitions_list), run_config_for_partition_fn=_run_config_wrapper, ) return inner def dynamic_partitioned_config( partition_fn: Callable[[Optional[datetime]], List[str]], ) -> Callable[[Callable[[str], Dict[str, Any]]], PartitionedConfig]: """Creates a dynamic partitioned config for a job. The provided partition_fn returns a list of strings identifying the set of partitions, given an optional datetime argument (representing the current time). The list of partitions returned may change over time. The decorated function takes in a partition key and returns a valid run config for a particular target job. Args: partition_fn (Callable[[datetime.datetime], Sequence[str]]): A function that generates a list of valid partition keys, which serve as the range of values that can be provided to the decorated run config function. """ check.callable_param(partition_fn, "partition_fn") def inner(fn: Callable[[str], Dict[str, Any]]) -> PartitionedConfig: def _partitions_wrapper(current_time: Optional[datetime] = None): partition_keys = partition_fn(current_time) return [Partition(key) for key in partition_keys] def _run_config_wrapper(partition: Partition[T]) -> Dict[str, Any]: return fn(partition.name) return PartitionedConfig( partitions_def=DynamicPartitionsDefinition(_partitions_wrapper), run_config_for_partition_fn=_run_config_wrapper, ) return inner def get_cron_schedule( schedule_type: ScheduleType, time_of_day: time = time(0, 0), day_of_week: Optional[int] = 0, ) -> str: minute = time_of_day.minute hour = time_of_day.hour day = day_of_week if schedule_type is ScheduleType.HOURLY: return f"{minute} * * * *" elif schedule_type is ScheduleType.DAILY: return f"{minute} {hour} * * *" elif schedule_type is ScheduleType.WEEKLY: return f"{minute} {hour} * * {day}" elif schedule_type is ScheduleType.MONTHLY: return f"{minute} {hour} {day} * *" else: check.assert_never(schedule_type)
1
16,175
The description predates this diff but I think we can come up with something more grounded.
dagster-io-dagster
py
@@ -277,7 +277,17 @@ type VolumeAPISpec struct { } `yaml:"metadata"` } -// -------------Snapshot Structs ---------- +// SnapshotAPISpec hsolds the config for creating asnapshot of volume +type SnapshotAPISpec struct { + Kind string `yaml:"kind"` + APIVersion string `yaml:"apiVersion"` + Metadata struct { + Name string `yaml:"name"` + } `yaml:"metadata"` + Spec struct { + VolumeName string `yaml:"volumeName"` + } `yaml:"spec"` +} // VolumeSnapshot is volume snapshot object accessible to the user. Upon successful creation of the actual // snapshot by the volume provider it is bound to the corresponding VolumeSnapshotData through
1
// Package v1 - Description provided at doc.go // // NOTE: // There are references to Kubernetes (K8s) types & links. This reflects the // similarity of OpenEBS design principles with K8s. These may not be a // one-to-one mapping though. // // We have not imported the K8s namespaces as-is, as OpenEBS will change // these to suit its requirements. // // NOTE: // A volume in OpenEBS has the same design as a pod in K8s. Alternatively, // a volume in OpenEBS can be considered as a StoragePod. package v1 // Volume is a user's Request for a OpenEBS volume type Volume struct { TypeMeta `json:",inline"` // Standard object's metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // VolumeType holds the type of this volume // e.g. Jiva volume type or CStor volume type, etc VolumeType VolumeType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=VolumeType"` // OrchProvider holds the container orchestrator that will // orchestrate OpenEBS volume for its provisioning & other // requirements OrchProvider OrchProvider `json:"orchestrator,omitempty" protobuf:"bytes,1,opt,name=orchestrator,casttype=OrchProvider"` // Namespace will hold the namespace where this Volume will exist Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` // Capacity will hold the capacity of this Volume Capacity string `json:"capacity,omitempty" protobuf:"bytes,1,opt,name=capacity"` // Specs contains the desired specifications the volume should have. // +optional Specs []VolumeSpec `json:"specs,omitempty" protobuf:"bytes,2,rep,name=specs"` // Status represents the current information/status of a volume Status VolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // VolumeList is a list of OpenEBS Volume items. type VolumeList struct { TypeMeta `json:",inline"` // Standard list metadata. // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of openebs volumes. Items []Volume `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeSpec provides various characteristics of a volume // that can be mounted, used, etc. // // NOTE: // Only one of its members may be specified. Currently OpenEBS is the only // member. There may be other members in future. type VolumeSpec struct { // The context of this volume specification. // Examples: "controller", "replica". Implicitly inferred to be "replica" // if unspecified. // +optional Context VolumeContext `json:"context,omitempty" protobuf:"bytes,1,opt,name=context,casttype=VolumeContext"` // Number of desired replicas. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Image represents the container image of this volume Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` // Resources represents the actual resources of the volume Capacity ResourceList // Source represents the location and type of a volume to mount. VolumeSource // AccessModes contains all ways the volume can be mounted // +optional AccessModes []VolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=VolumeAccessMode"` // Name of StorageClass to which this persistent volume belongs. Empty value // means that this volume does not belong to any StorageClass. // +optional StorageClassName string `json:"storageClassName,omitempty"` } // VolumeType defines the OpenEBS volume types that are // supported by Maya type VolumeType string const ( // JivaVolumeType represents a jiva volume JivaVolumeType VolumeType = "jiva" // CStorVolumeType represents a cstor volume //CStorVolumeType VolumeType = "cstor" ) // VolumeContext defines context of a volume type VolumeContext string const ( // ReplicaVolumeContext represents a volume w.r.t // replica context ReplicaVolumeContext VolumeContext = "replica" // ControllerVolumeContext represents a volume w.r.t // controller context ControllerVolumeContext VolumeContext = "controller" ) // OrchProvider defines the container orchestrators that // will orchestrate the OpenEBS volumes type OrchProvider string const ( // K8sOrchProvider represents Kubernetes orchestrator K8sOrchProvider OrchProvider = "kubernetes" ) // K8sKind defines the various K8s Kinds that are understood // by Maya type K8sKind string const ( DeploymentKK K8sKind = "deployment" ) // VolumeSource represents the source type of the Openebs volume. // NOTE: // Exactly one of its members must be set. Currently OpenEBS is the only // member. type VolumeSource struct { // OpenEBS represents an OpenEBS disk // +optional OpenEBS OpenEBS } // VolumeAccessMode defines different modes of volume access type VolumeAccessMode string const ( // ReadWriteOnce - can be mounted read/write mode to exactly 1 host ReadWriteOnce VolumeAccessMode = "ReadWriteOnce" // ReadOnlyMany - can be mounted in read-only mode to many hosts ReadOnlyMany VolumeAccessMode = "ReadOnlyMany" // ReadWriteMany - can be mounted in read/write mode to many hosts ReadWriteMany VolumeAccessMode = "ReadWriteMany" ) // VolumeStatus provides status of a volume type VolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim // +optional Phase VolumePhase // A human-readable message indicating details about why the volume is in this state. // +optional Message string // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI // +optional Reason string } // VolumePhase defines phase of a volume type VolumePhase string const ( // VolumePending - used for Volumes that are not available VolumePending VolumePhase = "Pending" // VolumeAvailable - used for Volumes that are not yet bound VolumeAvailable VolumePhase = "Available" // VolumeBound is used for Volumes that are bound VolumeBound VolumePhase = "Bound" // VolumeReleased - used for Volumes where the bound PersistentVol:syntime onumeClaim was deleted // released volumes must be recycled before becoming available again // this phase is used by the volume claim binder to signal to another process to reclaim the resource VolumeReleased VolumePhase = "Released" // VolumeFailed - used for Volumes that failed to be correctly recycled or deleted after being released from a claim VolumeFailed VolumePhase = "Failed" ) // OpenEBS - Represents a Persistent Disk resource in OpenEBS. // // An OpenEBS disk must exist before mounting to a container. An OpenEBS disk // can only be mounted as read/write once. OpenEBS volumes support // ownership management and SELinux relabeling. type OpenEBS struct { // Unique ID of the persistent disk resource in OpenEBS. // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // The partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). // +optional Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } // ObjectFieldSelector selects an APIVersioned field of an object. type ObjectFieldSelector struct { // Version of the schema the FieldPath is written in terms of, defaults to "v1". // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` // Path of the field to select in the specified API version. FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` } // ResourceRequirements describes the compute resource requirements. type ResourceRequirements struct { // Limits describes the maximum amount of compute resources allowed. // +optional Limits ResourceList // Requests describes the minimum amount of compute resources required. // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value // +optional Requests ResourceList } // ResourceName is the name identifying various resources in a ResourceList. type ResourceName string // ResourceList is a set of (resource name, quantity) pairs. type ResourceList map[ResourceName]Quantity // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // +optional Kind string // +optional Namespace string // +optional Name string // +optional UID string // +optional APIVersion string // +optional ResourceVersion string // Optional. If referring to a piece of an object instead of an entire object, this string // should contain information to identify the sub-object. For example, if the object // reference is to a container within a pod, this would take on a value like: // "spec.containers{name}" (where "name" refers to the name of the container that triggered // the event) or if no container name is specified "spec.containers[2]" (container with // index 2 in this pod). This syntax is chosen only to have some well-defined way of // referencing a part of an object. // TODO: this design is not final and this field is subject to change in the future. // +optional FieldPath string } // VolumeAPISpec holds the config for creating a Volume type VolumeAPISpec struct { Kind string `yaml:"kind"` APIVersion string `yaml:"apiVersion"` Metadata struct { Name string `yaml:"name"` Labels struct { Storage string `yaml:"volumeprovisioner.mapi.openebs.io/storage-size"` } } `yaml:"metadata"` } // -------------Snapshot Structs ---------- // VolumeSnapshot is volume snapshot object accessible to the user. Upon successful creation of the actual // snapshot by the volume provider it is bound to the corresponding VolumeSnapshotData through // the VolumeSnapshotSpec type VolumeSnapshot struct { TypeMeta `json:",inline"` Metadata ObjectMeta `json:"metadata"` // Spec represents the desired state of the snapshot // +optional Spec VolumeSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // SnapshotName represents the name of the snapshot SnapshotName string `json:"snapshotName" protobuf:"bytes,1,opt,name=snapshotName"` // Status represents the latest observer state of the snapshot // +optional Status VolumeSnapshotStatus `json:"status" protobuf:"bytes,3,opt,name=status"` } // VolumeSnapshotList - list of volume snapshots type VolumeSnapshotList struct { TypeMeta `json:",inline"` Metadata ListMeta `json:"metadata"` Items []VolumeSnapshot `json:"items"` } // VolumeSnapshotSpec - The desired state of the volume snapshot type VolumeSnapshotSpec struct { // PersistentVolumeClaimName is the name of the PVC being snapshotted // +optional VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=persistentVolumeClaimName"` // SnapshotDataName binds the VolumeSnapshot object with the VolumeSnapshotData // +optional SnapshotDataName string `json:"snapshotDataName" protobuf:"bytes,2,opt,name=snapshotDataName"` } // VolumeSnapshotStatus defines the status of a Volume Snapshot type VolumeSnapshotStatus struct { // The time the snapshot was successfully created // +optional CreationTimestamp Time `json:"creationTimestamp" protobuf:"bytes,1,opt,name=creationTimestamp"` // Representes the lates available observations about the volume snapshot Conditions []VolumeSnapshotCondition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` } // VolumeSnapshotConditionType - data type of volume snapshot condition type VolumeSnapshotConditionType string // These are valid conditions of a volume snapshot. const ( // VolumeSnapshotReady is added when the snapshot has been successfully created and is ready to be used. VolumeSnapshotConditionReady VolumeSnapshotConditionType = "Ready" ) // VolumeSnapshotCondition describes the state of a volume snapshot at a certain point. type VolumeSnapshotCondition struct { // Type of replication controller condition. Type VolumeSnapshotConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=VolumeSnapshotConditionType"` // Status of the condition, one of True, False, Unknown. //Status core_v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // The last time the condition transitioned from one status to another. // +optional LastTransitionTime Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` // The reason for the condition's last transition. // +optional Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` // A human readable message indicating details about the transition. // +optional Message string `json:"message" protobuf:"bytes,5,opt,name=message"` }
1
7,144
Let the Kind, APIVersion & Metadata follow the rules followed by Volume. What is the reason for a change for snapshot struct ?
openebs-maya
go
@@ -24,14 +24,14 @@ import org.apache.servicecomb.foundation.metrics.registry.GlobalRegistry; import com.google.common.eventbus.EventBus; import com.netflix.servo.DefaultMonitorRegistry; -import com.netflix.spectator.servo.ServoRegistry; public class DefaultRegistryInitializer implements MetricsInitializer { public static final String SERVO_POLLERS = "servo.pollers"; private GlobalRegistry globalRegistry; - private ServoRegistry registry; + @SuppressWarnings("deprecation") + private com.netflix.spectator.servo.ServoRegistry registry; // create registry before init meters @Override
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.metrics.core; import java.time.Duration; import org.apache.servicecomb.foundation.metrics.MetricsBootstrapConfig; import org.apache.servicecomb.foundation.metrics.MetricsInitializer; import org.apache.servicecomb.foundation.metrics.registry.GlobalRegistry; import com.google.common.eventbus.EventBus; import com.netflix.servo.DefaultMonitorRegistry; import com.netflix.spectator.servo.ServoRegistry; public class DefaultRegistryInitializer implements MetricsInitializer { public static final String SERVO_POLLERS = "servo.pollers"; private GlobalRegistry globalRegistry; private ServoRegistry registry; // create registry before init meters @Override public int getOrder() { return -10; } @Override public void init(GlobalRegistry globalRegistry, EventBus eventBus, MetricsBootstrapConfig config) { this.globalRegistry = globalRegistry; // spectator move poll gauges from inline to background executor // we need to set the interval to unify value System.setProperty("spectator.api.gaugePollingFrequency", Duration.ofMillis(config.getMsPollInterval()).toString()); System.setProperty(SERVO_POLLERS, String.valueOf(config.getMsPollInterval())); registry = new ServoRegistry(); globalRegistry.add(registry); } @Override public void destroy() { if (registry != null) { DefaultMonitorRegistry.getInstance().unregister(registry); globalRegistry.remove(registry); } } }
1
11,104
seems that we did not strong depend servo no need to keep it? change to another one?
apache-servicecomb-java-chassis
java
@@ -142,7 +142,15 @@ boost::property_tree::ptree wait_response (nano::system & system, std::shared_pt void check_block_response_count (nano::system & system, std::shared_ptr<nano::rpc> const & rpc, boost::property_tree::ptree & request, uint64_t size_count) { auto response (wait_response (system, rpc, request)); - ASSERT_EQ (size_count, response.get_child ("blocks").front ().second.size ()); + auto & blocks = response.get_child ("blocks"); + if (size_count > 0) + { + ASSERT_EQ (size_count, blocks.front ().second.size ()); + } + else + { + ASSERT_TRUE (blocks.empty ()); + } } class scoped_io_thread_name_change
1
#include <nano/boost/beast/core/flat_buffer.hpp> #include <nano/boost/beast/http.hpp> #include <nano/lib/rpcconfig.hpp> #include <nano/lib/threading.hpp> #include <nano/node/ipc/ipc_server.hpp> #include <nano/node/json_handler.hpp> #include <nano/node/node_rpc_config.hpp> #include <nano/rpc/rpc.hpp> #include <nano/rpc/rpc_request_processor.hpp> #include <nano/test_common/system.hpp> #include <nano/test_common/telemetry.hpp> #include <nano/test_common/testutil.hpp> #include <gtest/gtest.h> #include <boost/filesystem.hpp> #include <boost/property_tree/json_parser.hpp> #include <algorithm> #include <tuple> using namespace std::chrono_literals; namespace { class test_response { public: test_response (boost::property_tree::ptree const & request_a, boost::asio::io_context & io_ctx_a) : request (request_a), sock (io_ctx_a) { } test_response (boost::property_tree::ptree const & request_a, uint16_t port_a, boost::asio::io_context & io_ctx_a) : request (request_a), sock (io_ctx_a) { run (port_a); } void run (uint16_t port_a) { sock.async_connect (nano::tcp_endpoint (boost::asio::ip::address_v6::loopback (), port_a), [this] (boost::system::error_code const & ec) { if (!ec) { std::stringstream ostream; boost::property_tree::write_json (ostream, request); req.method (boost::beast::http::verb::post); req.target ("/"); req.version (11); ostream.flush (); req.body () = ostream.str (); req.prepare_payload (); boost::beast::http::async_write (sock, req, [this] (boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { boost::beast::http::async_read (sock, sb, resp, [this] (boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { std::stringstream body (resp.body ()); try { boost::property_tree::read_json (body, json); status = 200; } catch (std::exception &) { status = 500; } } else { status = 400; } }); } else { status = 600; } }); } else { status = 400; } }); } boost::property_tree::ptree const & request; boost::asio::ip::tcp::socket sock; boost::property_tree::ptree json; boost::beast::flat_buffer sb; boost::beast::http::request<boost::beast::http::string_body> req; boost::beast::http::response<boost::beast::http::string_body> resp; std::atomic<int> status{ 0 }; }; std::shared_ptr<nano::node> add_ipc_enabled_node (nano::system & system, nano::node_config & node_config, nano::node_flags const & node_flags) { node_config.ipc_config.transport_tcp.enabled = true; node_config.ipc_config.transport_tcp.port = nano::get_available_port (); return system.add_node (node_config, node_flags); } std::shared_ptr<nano::node> add_ipc_enabled_node (nano::system & system, nano::node_config & node_config) { return add_ipc_enabled_node (system, node_config, nano::node_flags ()); } std::shared_ptr<nano::node> add_ipc_enabled_node (nano::system & system) { nano::node_config node_config (nano::get_available_port (), system.logging); return add_ipc_enabled_node (system, node_config); } void reset_confirmation_height (nano::store & store, nano::account const & account) { auto transaction = store.tx_begin_write (); nano::confirmation_height_info confirmation_height_info; if (!store.confirmation_height.get (transaction, account, confirmation_height_info)) { store.confirmation_height.clear (transaction, account); } } void wait_response_impl (nano::system & system, std::shared_ptr<nano::rpc> const & rpc, boost::property_tree::ptree & request, const std::chrono::duration<double, std::nano> & time, boost::property_tree::ptree & response_json) { test_response response (request, rpc->config.port, system.io_ctx); ASSERT_TIMELY (time, response.status != 0); ASSERT_EQ (200, response.status); response_json = response.json; } boost::property_tree::ptree wait_response (nano::system & system, std::shared_ptr<nano::rpc> const & rpc, boost::property_tree::ptree & request, const std::chrono::duration<double, std::nano> & time = 5s) { boost::property_tree::ptree response_json; wait_response_impl (system, rpc, request, time, response_json); return response_json; } void check_block_response_count (nano::system & system, std::shared_ptr<nano::rpc> const & rpc, boost::property_tree::ptree & request, uint64_t size_count) { auto response (wait_response (system, rpc, request)); ASSERT_EQ (size_count, response.get_child ("blocks").front ().second.size ()); } class scoped_io_thread_name_change { public: scoped_io_thread_name_change () { renew (); } ~scoped_io_thread_name_change () { reset (); } void reset () { nano::thread_role::set (nano::thread_role::name::unknown); } void renew () { nano::thread_role::set (nano::thread_role::name::io); } }; class rpc_context { public: rpc_context (std::unique_ptr<nano::ipc::ipc_server> & ipc_server_a, std::unique_ptr<nano::ipc_rpc_processor> & ipc_rpc_processor_a, std::unique_ptr<nano::node_rpc_config> & node_rpc_config_a, std::unique_ptr<scoped_io_thread_name_change> & io_scope_a) { ipc_server = std::move (ipc_server_a); ipc_rpc_processor = std::move (ipc_rpc_processor_a); node_rpc_config = std::move (node_rpc_config_a); io_scope = std::move (io_scope_a); } std::unique_ptr<nano::ipc::ipc_server> ipc_server; std::unique_ptr<nano::ipc_rpc_processor> ipc_rpc_processor; std::unique_ptr<nano::node_rpc_config> node_rpc_config; std::unique_ptr<scoped_io_thread_name_change> io_scope; }; std::tuple<std::shared_ptr<nano::rpc>, std::unique_ptr<rpc_context>> add_rpc (nano::system & system, std::shared_ptr<nano::node> const & node_a) { auto scoped_thread_name_io (std::make_unique<scoped_io_thread_name_change> ()); auto node_rpc_config (std::make_unique<nano::node_rpc_config> ()); auto ipc_server (std::make_unique<nano::ipc::ipc_server> (*node_a, *node_rpc_config)); nano::rpc_config rpc_config (nano::get_available_port (), true); rpc_config.rpc_process.ipc_port = node_a->config.ipc_config.transport_tcp.port; auto ipc_rpc_processor (std::make_unique<nano::ipc_rpc_processor> (system.io_ctx, rpc_config)); auto rpc (std::make_shared<nano::rpc> (system.io_ctx, rpc_config, *ipc_rpc_processor)); rpc->start (); auto rpc_ctx (std::make_unique<rpc_context> (ipc_server, ipc_rpc_processor, node_rpc_config, scoped_thread_name_io)); return std::make_tuple (rpc, std::move (rpc_ctx)); } } TEST (rpc, wrapped_task) { nano::system system; auto & node = *add_ipc_enabled_node (system); nano::node_rpc_config node_rpc_config; std::atomic<bool> response (false); auto response_handler_l ([&response] (std::string const & response_a) { std::stringstream istream (response_a); boost::property_tree::ptree json_l; ASSERT_NO_THROW (boost::property_tree::read_json (istream, json_l)); ASSERT_EQ (1, json_l.count ("error")); ASSERT_EQ ("Unable to parse JSON", json_l.get<std::string> ("error")); response = true; }); auto handler_l (std::make_shared<nano::json_handler> (node, node_rpc_config, "", response_handler_l)); auto task (handler_l->create_worker_task ([] (std::shared_ptr<nano::json_handler> const &) { // Exception should get caught throw std::runtime_error (""); })); system.nodes[0]->workers.push_task (task); ASSERT_TIMELY (5s, response == true); } TEST (rpc, account_balance) { nano::system system; auto node = add_ipc_enabled_node (system); // Add a send block (which will add a pending entry too) for the genesis account nano::state_block_builder builder; auto send1 = builder.make_block () .account (nano::dev::genesis_key.pub) .previous (nano::dev::genesis->hash ()) .representative (nano::dev::genesis_key.pub) .balance (nano::dev::genesis_amount - 1) .link (nano::dev::genesis_key.pub) .sign (nano::dev::genesis_key.prv, nano::dev::genesis_key.pub) .work (*system.work.generate (nano::dev::genesis->hash ())) .build (); ASSERT_EQ (nano::process_result::progress, node->process (*send1).code); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "account_balance"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); // The send and pending should be unconfirmed { auto response (wait_response (system, rpc, request)); std::string balance_text (response.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211455", balance_text); std::string pending_text (response.get<std::string> ("pending")); ASSERT_EQ ("0", pending_text); } request.put ("include_only_confirmed", false); { auto response (wait_response (system, rpc, request)); std::string balance_text (response.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211454", balance_text); std::string pending_text (response.get<std::string> ("pending")); ASSERT_EQ ("1", pending_text); } } TEST (rpc, account_block_count) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, context] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "account_block_count"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string block_count_text (response.get<std::string> ("block_count")); ASSERT_EQ ("1", block_count_text); } TEST (rpc, account_create) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "account_create"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response0 (wait_response (system, rpc, request)); auto account_text0 (response0.get<std::string> ("account")); nano::account account0; ASSERT_FALSE (account0.decode_account (account_text0)); ASSERT_TRUE (system.wallet (0)->exists (account0)); constexpr uint64_t max_index (std::numeric_limits<uint32_t>::max ()); request.put ("index", max_index); auto response1 (wait_response (system, rpc, request, 10s)); auto account_text1 (response1.get<std::string> ("account")); nano::account account1; ASSERT_FALSE (account1.decode_account (account_text1)); ASSERT_TRUE (system.wallet (0)->exists (account1)); request.put ("index", max_index + 1); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_common::invalid_index).message (), response2.get<std::string> ("error")); } TEST (rpc, account_weight) { nano::keypair key; nano::system system; auto node1 = add_ipc_enabled_node (system); nano::block_hash latest (node1->latest (nano::dev::genesis_key.pub)); nano::change_block block (latest, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); ASSERT_EQ (nano::process_result::progress, node1->process (block).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "account_weight"); request.put ("account", key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string balance_text (response.get<std::string> ("weight")); ASSERT_EQ ("340282366920938463463374607431768211455", balance_text); } TEST (rpc, wallet_contains) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_contains"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string exists_text (response.get<std::string> ("exists")); ASSERT_EQ ("1", exists_text); } TEST (rpc, wallet_doesnt_contain) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_contains"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string exists_text (response.get<std::string> ("exists")); ASSERT_EQ ("0", exists_text); } TEST (rpc, validate_account_number) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "validate_account_number"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string exists_text (response.get<std::string> ("valid")); ASSERT_EQ ("1", exists_text); } TEST (rpc, validate_account_invalid) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); std::string account; nano::dev::genesis_key.pub.encode_account (account); account[0] ^= 0x1; boost::property_tree::ptree request; request.put ("action", "validate_account_number"); request.put ("account", account); auto response (wait_response (system, rpc, request)); std::string exists_text (response.get<std::string> ("valid")); ASSERT_EQ ("0", exists_text); } TEST (rpc, send) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::dev::genesis_key.pub.to_account ()); request.put ("amount", "100"); ASSERT_EQ (node->balance (nano::dev::genesis_key.pub), nano::dev::genesis_amount); auto response (wait_response (system, rpc, request, 10s)); std::string block_text (response.get<std::string> ("block")); nano::block_hash block; ASSERT_FALSE (block.decode_hex (block_text)); ASSERT_TRUE (node->ledger.block_or_pruned_exists (block)); ASSERT_EQ (node->latest (nano::dev::genesis_key.pub), block); ASSERT_NE (node->balance (nano::dev::genesis_key.pub), nano::dev::genesis_amount); } TEST (rpc, send_fail) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::dev::genesis_key.pub.to_account ()); request.put ("amount", "100"); auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::account_not_found_wallet).message (), response.get<std::string> ("error")); } TEST (rpc, send_work) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::dev::genesis_key.pub.to_account ()); request.put ("amount", "100"); request.put ("work", "1"); auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::invalid_work).message (), response.get<std::string> ("error")); request.erase ("work"); request.put ("work", nano::to_string_hex (*node->work_generate_blocking (node->latest (nano::dev::genesis_key.pub)))); auto response2 (wait_response (system, rpc, request, 10s)); std::string block_text (response2.get<std::string> ("block")); nano::block_hash block; ASSERT_FALSE (block.decode_hex (block_text)); ASSERT_TRUE (node->ledger.block_or_pruned_exists (block)); ASSERT_EQ (node->latest (nano::dev::genesis_key.pub), block); } TEST (rpc, send_work_disabled) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.work_threads = 0; auto node = add_ipc_enabled_node (system, node_config); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::dev::genesis_key.pub.to_account ()); request.put ("amount", "100"); auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::disabled_work_generation).message (), response.get<std::string> ("error")); } TEST (rpc, send_idempotent) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::account (0).to_account ()); request.put ("amount", (nano::dev::genesis_amount - (nano::dev::genesis_amount / 4)).convert_to<std::string> ()); request.put ("id", "123abc"); auto response (wait_response (system, rpc, request)); std::string block_text (response.get<std::string> ("block")); nano::block_hash block; ASSERT_FALSE (block.decode_hex (block_text)); ASSERT_TRUE (node->ledger.block_or_pruned_exists (block)); ASSERT_EQ (node->balance (nano::dev::genesis_key.pub), nano::dev::genesis_amount / 4); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ ("", response2.get<std::string> ("error", "")); ASSERT_EQ (block_text, response2.get<std::string> ("block")); ASSERT_EQ (node->balance (nano::dev::genesis_key.pub), nano::dev::genesis_amount / 4); request.erase ("id"); request.put ("id", "456def"); auto response3 (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_common::insufficient_balance).message (), response3.get<std::string> ("error")); } TEST (rpc, send_epoch_2) { nano::system system; auto node = add_ipc_enabled_node (system); // Upgrade the genesis account to epoch 2 ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1)); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv, false); auto target_difficulty = nano::work_threshold (nano::work_version::work_1, nano::block_details (nano::epoch::epoch_2, true, false, false)); ASSERT_LT (node->network_params.network.publish_thresholds.entry, target_difficulty); auto min_difficulty = node->network_params.network.publish_thresholds.entry; auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "send"); request.put ("source", nano::dev::genesis_key.pub.to_account ()); request.put ("destination", nano::keypair ().pub.to_account ()); request.put ("amount", "1"); // Test that the correct error is given if there is insufficient work auto insufficient = system.work_generate_limited (nano::dev::genesis->hash (), min_difficulty, target_difficulty); request.put ("work", nano::to_string_hex (insufficient)); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::invalid_work); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } TEST (rpc, send_ipc_random_id) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); std::atomic<bool> got_request{ false }; rpc_ctx->node_rpc_config->set_request_callback ([&got_request] (boost::property_tree::ptree const & request_a) { EXPECT_TRUE (request_a.count ("id")); got_request = true; }); boost::property_tree::ptree request; request.put ("action", "send"); auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ ("Unable to parse JSON", response.get<std::string> ("error")); ASSERT_TRUE (got_request); } TEST (rpc, stop) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "stop"); auto response (wait_response (system, rpc, request)); } TEST (rpc, wallet_add) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key1; std::string key_text; key1.prv.encode_hex (key_text); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_add"); request.put ("key", key_text); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("account")); ASSERT_EQ (account_text1, key1.pub.to_account ()); ASSERT_TRUE (system.wallet (0)->exists (key1.pub)); } TEST (rpc, wallet_password_valid) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "password_valid"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("valid")); ASSERT_EQ (account_text1, "1"); } TEST (rpc, wallet_password_change) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "password_change"); request.put ("password", "test"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("changed")); ASSERT_EQ (account_text1, "1"); rpc_ctx->io_scope->reset (); auto transaction (system.wallet (0)->wallets.tx_begin_write ()); ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction)); ASSERT_TRUE (system.wallet (0)->enter_password (transaction, "")); ASSERT_FALSE (system.wallet (0)->store.valid_password (transaction)); ASSERT_FALSE (system.wallet (0)->enter_password (transaction, "test")); ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction)); } TEST (rpc, wallet_password_enter) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::raw_key password_l; password_l.clear (); system.deadline_set (10s); while (password_l == 0) { ASSERT_NO_ERROR (system.poll ()); system.wallet (0)->store.password.value (password_l); } boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "password_enter"); request.put ("password", ""); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("valid")); ASSERT_EQ (account_text1, "1"); } TEST (rpc, wallet_representative) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_representative"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("representative")); ASSERT_EQ (account_text1, nano::dev::genesis->account ().to_account ()); } TEST (rpc, wallet_representative_set) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); nano::keypair key; request.put ("action", "wallet_representative_set"); request.put ("representative", key.pub.to_account ()); auto response (wait_response (system, rpc, request)); auto transaction (node->wallets.tx_begin_read ()); ASSERT_EQ (key.pub, node->wallets.items.begin ()->second->store.representative (transaction)); } TEST (rpc, wallet_representative_set_force) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); nano::keypair key; request.put ("action", "wallet_representative_set"); request.put ("representative", key.pub.to_account ()); request.put ("update_existing_accounts", true); auto response (wait_response (system, rpc, request)); { auto transaction (node->wallets.tx_begin_read ()); ASSERT_EQ (key.pub, node->wallets.items.begin ()->second->store.representative (transaction)); } nano::account representative (0); while (representative != key.pub) { auto transaction (node->store.tx_begin_read ()); nano::account_info info; if (!node->store.account.get (transaction, nano::dev::genesis_key.pub, info)) { representative = info.representative; } ASSERT_NO_ERROR (system.poll ()); } } TEST (rpc, account_list) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key2.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "account_list"); auto response (wait_response (system, rpc, request)); auto & accounts_node (response.get_child ("accounts")); std::vector<nano::account> accounts; for (auto i (accounts_node.begin ()), j (accounts_node.end ()); i != j; ++i) { auto account (i->second.get<std::string> ("")); nano::account number; ASSERT_FALSE (number.decode_account (account)); accounts.push_back (number); } ASSERT_EQ (2, accounts.size ()); for (auto i (accounts.begin ()), j (accounts.end ()); i != j; ++i) { ASSERT_TRUE (system.wallet (0)->exists (*i)); } } TEST (rpc, wallet_key_valid) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_key_valid"); auto response (wait_response (system, rpc, request)); std::string exists_text (response.get<std::string> ("valid")); ASSERT_EQ ("1", exists_text); } TEST (rpc, wallet_create) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_create"); auto response (wait_response (system, rpc, request)); std::string wallet_text (response.get<std::string> ("wallet")); nano::wallet_id wallet_id; ASSERT_FALSE (wallet_id.decode_hex (wallet_text)); ASSERT_NE (node->wallets.items.end (), node->wallets.items.find (wallet_id)); } TEST (rpc, wallet_create_seed) { nano::system system; auto node = add_ipc_enabled_node (system); nano::raw_key seed; nano::random_pool::generate_block (seed.bytes.data (), seed.bytes.size ()); auto prv = nano::deterministic_key (seed, 0); auto pub (nano::pub_key (prv)); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_create"); request.put ("seed", seed.to_string ()); auto response (wait_response (system, rpc, request, 10s)); std::string wallet_text (response.get<std::string> ("wallet")); nano::wallet_id wallet_id; ASSERT_FALSE (wallet_id.decode_hex (wallet_text)); auto existing (node->wallets.items.find (wallet_id)); ASSERT_NE (node->wallets.items.end (), existing); { auto transaction (node->wallets.tx_begin_read ()); nano::raw_key seed0; existing->second->store.seed (seed0, transaction); ASSERT_EQ (seed, seed0); } auto account_text (response.get<std::string> ("last_restored_account")); nano::account account; ASSERT_FALSE (account.decode_account (account_text)); ASSERT_TRUE (existing->second->exists (account)); ASSERT_EQ (pub, account); ASSERT_EQ ("1", response.get<std::string> ("restored_count")); } TEST (rpc, wallet_export) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_export"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system, rpc, request)); std::string wallet_json (response.get<std::string> ("json")); bool error (false); rpc_ctx->io_scope->reset (); auto transaction (node->wallets.tx_begin_write ()); nano::kdf kdf; nano::wallet_store store (error, kdf, transaction, nano::dev::genesis->account (), 1, "0", wallet_json); ASSERT_FALSE (error); ASSERT_TRUE (store.exists (transaction, nano::dev::genesis_key.pub)); } TEST (rpc, wallet_destroy) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); auto wallet_id (node->wallets.items.begin ()->first); boost::property_tree::ptree request; request.put ("action", "wallet_destroy"); request.put ("wallet", wallet_id.to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ (node->wallets.items.end (), node->wallets.items.find (wallet_id)); } TEST (rpc, account_move) { nano::system system; auto node = add_ipc_enabled_node (system); auto wallet_id (node->wallets.items.begin ()->first); auto destination (system.wallet (0)); destination->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto source_id = nano::random_wallet_id (); auto source (node->wallets.create (source_id)); source->insert_adhoc (key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "account_move"); request.put ("wallet", wallet_id.to_string ()); request.put ("source", source_id.to_string ()); boost::property_tree::ptree keys; boost::property_tree::ptree entry; entry.put ("", key.pub.to_account ()); keys.push_back (std::make_pair ("", entry)); request.add_child ("accounts", keys); auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("moved")); ASSERT_TRUE (destination->exists (key.pub)); ASSERT_TRUE (destination->exists (nano::dev::genesis_key.pub)); auto transaction (node->wallets.tx_begin_read ()); ASSERT_EQ (source->store.end (), source->store.begin (transaction)); } TEST (rpc, block) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block"); request.put ("hash", node->latest (nano::dev::genesis->account ()).to_string ()); auto response (wait_response (system, rpc, request)); auto contents (response.get<std::string> ("contents")); ASSERT_FALSE (contents.empty ()); ASSERT_TRUE (response.get<bool> ("confirmed")); // Genesis block is confirmed by default } TEST (rpc, block_account) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::genesis genesis; boost::property_tree::ptree request; request.put ("action", "block_account"); request.put ("hash", genesis.hash ().to_string ()); auto response (wait_response (system, rpc, request)); std::string account_text (response.get<std::string> ("account")); nano::account account; ASSERT_FALSE (account.decode_account (account_text)); } TEST (rpc, chain) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); ASSERT_NE (nullptr, block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "chain"); request.put ("block", block->hash ().to_string ()); request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ())); auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (2, blocks.size ()); ASSERT_EQ (block->hash (), blocks[0]); ASSERT_EQ (genesis, blocks[1]); } TEST (rpc, chain_limit) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); ASSERT_NE (nullptr, block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "chain"); request.put ("block", block->hash ().to_string ()); request.put ("count", 1); auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (1, blocks.size ()); ASSERT_EQ (block->hash (), blocks[0]); } TEST (rpc, chain_offset) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); ASSERT_NE (nullptr, block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "chain"); request.put ("block", block->hash ().to_string ()); request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ())); request.put ("offset", 1); auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (1, blocks.size ()); ASSERT_EQ (genesis, blocks[0]); } TEST (rpc, frontier) { nano::system system; auto node = add_ipc_enabled_node (system); std::unordered_map<nano::account, nano::block_hash> source; { auto transaction (node->store.tx_begin_write ()); for (auto i (0); i < 1000; ++i) { nano::keypair key; nano::block_hash hash; nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ()); source[key.pub] = hash; node->store.confirmation_height.put (transaction, key.pub, { 0, nano::block_hash (0) }); node->store.account.put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0)); } } nano::keypair key; auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "frontiers"); request.put ("account", nano::account (0).to_account ()); request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ())); auto response (wait_response (system, rpc, request)); auto & frontiers_node (response.get_child ("frontiers")); std::unordered_map<nano::account, nano::block_hash> frontiers; for (auto i (frontiers_node.begin ()), j (frontiers_node.end ()); i != j; ++i) { nano::account account; account.decode_account (i->first); nano::block_hash frontier; frontier.decode_hex (i->second.get<std::string> ("")); frontiers[account] = frontier; } ASSERT_EQ (1, frontiers.erase (nano::dev::genesis_key.pub)); ASSERT_EQ (source, frontiers); } TEST (rpc, frontier_limited) { nano::system system; auto node = add_ipc_enabled_node (system); std::unordered_map<nano::account, nano::block_hash> source; { auto transaction (node->store.tx_begin_write ()); for (auto i (0); i < 1000; ++i) { nano::keypair key; nano::block_hash hash; nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ()); source[key.pub] = hash; node->store.confirmation_height.put (transaction, key.pub, { 0, nano::block_hash (0) }); node->store.account.put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0)); } } nano::keypair key; auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "frontiers"); request.put ("account", nano::account (0).to_account ()); request.put ("count", std::to_string (100)); auto response (wait_response (system, rpc, request)); auto & frontiers_node (response.get_child ("frontiers")); ASSERT_EQ (100, frontiers_node.size ()); } TEST (rpc, frontier_startpoint) { nano::system system; auto node = add_ipc_enabled_node (system); std::unordered_map<nano::account, nano::block_hash> source; { auto transaction (node->store.tx_begin_write ()); for (auto i (0); i < 1000; ++i) { nano::keypair key; nano::block_hash hash; nano::random_pool::generate_block (hash.bytes.data (), hash.bytes.size ()); source[key.pub] = hash; node->store.confirmation_height.put (transaction, key.pub, { 0, nano::block_hash (0) }); node->store.account.put (transaction, key.pub, nano::account_info (hash, 0, 0, 0, 0, 0, nano::epoch::epoch_0)); } } nano::keypair key; auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "frontiers"); request.put ("account", source.begin ()->first.to_account ()); request.put ("count", std::to_string (1)); auto response (wait_response (system, rpc, request)); auto & frontiers_node (response.get_child ("frontiers")); ASSERT_EQ (1, frontiers_node.size ()); ASSERT_EQ (source.begin ()->first.to_account (), frontiers_node.begin ()->first); } TEST (rpc, history) { nano::system system; auto node0 = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto change (system.wallet (0)->change_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub)); ASSERT_NE (nullptr, change); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub, node0->config.receive_minimum.number ())); ASSERT_NE (nullptr, send); auto receive (system.wallet (0)->receive_action (send->hash (), nano::dev::genesis_key.pub, node0->config.receive_minimum.number (), send->link ().as_account ())); ASSERT_NE (nullptr, receive); nano::genesis genesis; nano::state_block usend (nano::dev::genesis->account (), node0->latest (nano::dev::genesis->account ()), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis->account (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (node0->latest (nano::dev::genesis->account ()))); nano::state_block ureceive (nano::dev::genesis->account (), usend.hash (), nano::dev::genesis->account (), nano::dev::genesis_amount, usend.hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (usend.hash ())); nano::state_block uchange (nano::dev::genesis->account (), ureceive.hash (), nano::keypair ().pub, nano::dev::genesis_amount, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (ureceive.hash ())); { auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, usend).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, ureceive).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, uchange).code); } auto [rpc, rpc_ctx] = add_rpc (system, node0); boost::property_tree::ptree request; request.put ("action", "history"); request.put ("hash", uchange.hash ().to_string ()); request.put ("count", 100); auto response (wait_response (system, rpc, request)); std::vector<std::tuple<std::string, std::string, std::string, std::string>> history_l; auto & history_node (response.get_child ("history")); for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash"))); } ASSERT_EQ (5, history_l.size ()); ASSERT_EQ ("receive", std::get<0> (history_l[0])); ASSERT_EQ (ureceive.hash ().to_string (), std::get<3> (history_l[0])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[0])); ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[0])); ASSERT_EQ (5, history_l.size ()); ASSERT_EQ ("send", std::get<0> (history_l[1])); ASSERT_EQ (usend.hash ().to_string (), std::get<3> (history_l[1])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[1])); ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[1])); ASSERT_EQ ("receive", std::get<0> (history_l[2])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[2])); ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2])); ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[2])); ASSERT_EQ ("send", std::get<0> (history_l[3])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[3])); ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[3])); ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[3])); ASSERT_EQ ("receive", std::get<0> (history_l[4])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[4])); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[4])); ASSERT_EQ (genesis.hash ().to_string (), std::get<3> (history_l[4])); } TEST (rpc, account_history) { nano::system system; auto node0 = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto change (system.wallet (0)->change_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub)); ASSERT_NE (nullptr, change); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub, node0->config.receive_minimum.number ())); ASSERT_NE (nullptr, send); auto receive (system.wallet (0)->receive_action (send->hash (), nano::dev::genesis_key.pub, node0->config.receive_minimum.number (), send->link ().as_account ())); ASSERT_NE (nullptr, receive); nano::genesis genesis; nano::state_block usend (nano::dev::genesis->account (), node0->latest (nano::dev::genesis->account ()), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis->account (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (node0->latest (nano::dev::genesis->account ()))); nano::state_block ureceive (nano::dev::genesis->account (), usend.hash (), nano::dev::genesis->account (), nano::dev::genesis_amount, usend.hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (usend.hash ())); nano::state_block uchange (nano::dev::genesis->account (), ureceive.hash (), nano::keypair ().pub, nano::dev::genesis_amount, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (ureceive.hash ())); { auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, usend).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, ureceive).code); ASSERT_EQ (nano::process_result::progress, node0->ledger.process (transaction, uchange).code); } auto [rpc, rpc_ctx] = add_rpc (system, node0); { boost::property_tree::ptree request; request.put ("action", "account_history"); request.put ("account", nano::dev::genesis->account ().to_account ()); request.put ("count", 100); auto response (wait_response (system, rpc, request, 10s)); std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>> history_l; auto & history_node (response.get_child ("history")); for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash"), i->second.get<std::string> ("height"))); } ASSERT_EQ (5, history_l.size ()); ASSERT_EQ ("receive", std::get<0> (history_l[0])); ASSERT_EQ (ureceive.hash ().to_string (), std::get<3> (history_l[0])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[0])); ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[0])); ASSERT_EQ ("6", std::get<4> (history_l[0])); // change block (height 7) is skipped by account_history since "raw" is not set ASSERT_EQ ("send", std::get<0> (history_l[1])); ASSERT_EQ (usend.hash ().to_string (), std::get<3> (history_l[1])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[1])); ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[1])); ASSERT_EQ ("5", std::get<4> (history_l[1])); ASSERT_EQ ("receive", std::get<0> (history_l[2])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[2])); ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2])); ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[2])); ASSERT_EQ ("4", std::get<4> (history_l[2])); ASSERT_EQ ("send", std::get<0> (history_l[3])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[3])); ASSERT_EQ (node0->config.receive_minimum.to_string_dec (), std::get<2> (history_l[3])); ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[3])); ASSERT_EQ ("3", std::get<4> (history_l[3])); ASSERT_EQ ("receive", std::get<0> (history_l[4])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[4])); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[4])); ASSERT_EQ (genesis.hash ().to_string (), std::get<3> (history_l[4])); ASSERT_EQ ("1", std::get<4> (history_l[4])); // change block (height 2) is skipped } // Test count and reverse { boost::property_tree::ptree request; request.put ("action", "account_history"); request.put ("account", nano::dev::genesis->account ().to_account ()); request.put ("reverse", true); request.put ("count", 1); auto response (wait_response (system, rpc, request, 10s)); auto & history_node (response.get_child ("history")); ASSERT_EQ (1, history_node.size ()); ASSERT_EQ ("1", history_node.begin ()->second.get<std::string> ("height")); ASSERT_EQ (change->hash ().to_string (), response.get<std::string> ("next")); } // Test filtering rpc_ctx->io_scope->reset (); auto account2 (system.wallet (0)->deterministic_insert ()); auto send2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, account2, node0->config.receive_minimum.number ())); ASSERT_NE (nullptr, send2); auto receive2 (system.wallet (0)->receive_action (send2->hash (), account2, node0->config.receive_minimum.number (), send2->link ().as_account ())); rpc_ctx->io_scope->renew (); // Test filter for send state blocks ASSERT_NE (nullptr, receive2); { boost::property_tree::ptree request; request.put ("action", "account_history"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); boost::property_tree::ptree other_account; other_account.put ("", account2.to_account ()); boost::property_tree::ptree filtered_accounts; filtered_accounts.push_back (std::make_pair ("", other_account)); request.add_child ("account_filter", filtered_accounts); request.put ("count", 100); auto response (wait_response (system, rpc, request)); auto history_node (response.get_child ("history")); ASSERT_EQ (history_node.size (), 2); } // Test filter for receive state blocks { boost::property_tree::ptree request; request.put ("action", "account_history"); request.put ("account", account2.to_account ()); boost::property_tree::ptree other_account; other_account.put ("", nano::dev::genesis_key.pub.to_account ()); boost::property_tree::ptree filtered_accounts; filtered_accounts.push_back (std::make_pair ("", other_account)); request.add_child ("account_filter", filtered_accounts); request.put ("count", 100); auto response (wait_response (system, rpc, request)); auto history_node (response.get_child ("history")); ASSERT_EQ (history_node.size (), 1); } } TEST (rpc, history_count) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto change (system.wallet (0)->change_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub)); ASSERT_NE (nullptr, change); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, send); auto receive (system.wallet (0)->receive_action (send->hash (), nano::dev::genesis_key.pub, node->config.receive_minimum.number (), send->link ().as_account ())); ASSERT_NE (nullptr, receive); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "history"); request.put ("hash", receive->hash ().to_string ()); request.put ("count", 1); auto response (wait_response (system, rpc, request)); auto & history_node (response.get_child ("history")); ASSERT_EQ (1, history_node.size ()); } TEST (rpc, history_pruning) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto node0 = add_ipc_enabled_node (system, node_config, node_flags); nano::genesis genesis; auto change (std::make_shared<nano::change_block> (genesis.hash (), nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work.generate (genesis.hash ()))); node0->process_active (change); auto send (std::make_shared<nano::send_block> (change->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - node0->config.receive_minimum.number (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work.generate (change->hash ()))); node0->process_active (send); auto receive (std::make_shared<nano::receive_block> (send->hash (), send->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work.generate (send->hash ()))); node0->process_active (receive); auto usend (std::make_shared<nano::state_block> (nano::dev::genesis->account (), receive->hash (), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis->account (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (receive->hash ()))); auto ureceive (std::make_shared<nano::state_block> (nano::dev::genesis->account (), usend->hash (), nano::dev::genesis->account (), nano::dev::genesis_amount, usend->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (usend->hash ()))); auto uchange (std::make_shared<nano::state_block> (nano::dev::genesis->account (), ureceive->hash (), nano::keypair ().pub, nano::dev::genesis_amount, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node0->work_generate_blocking (ureceive->hash ()))); node0->process_active (usend); node0->process_active (ureceive); node0->process_active (uchange); node0->block_processor.flush (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); // Confirm last block to prune previous { auto election = node0->active.election (change->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->block_confirmed (change->hash ()) && node0->active.active (send->qualified_root ())); { auto election = node0->active.election (send->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->block_confirmed (send->hash ()) && node0->active.active (receive->qualified_root ())); { auto election = node0->active.election (receive->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->block_confirmed (receive->hash ()) && node0->active.active (usend->qualified_root ())); { auto election = node0->active.election (usend->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->block_confirmed (usend->hash ()) && node0->active.active (ureceive->qualified_root ())); { auto election = node0->active.election (ureceive->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->block_confirmed (ureceive->hash ()) && node0->active.active (uchange->qualified_root ())); { auto election = node0->active.election (uchange->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); } ASSERT_TIMELY (2s, node0->active.empty () && node0->block_confirmed (uchange->hash ())); ASSERT_TIMELY (2s, node0->ledger.cache.cemented_count == 7 && node0->confirmation_height_processor.current ().is_zero () && node0->confirmation_height_processor.awaiting_processing_size () == 0); // Pruning action { auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (1, node0->ledger.pruning_action (transaction, change->hash (), 1)); } auto [rpc, rpc_ctx] = add_rpc (system, node0); boost::property_tree::ptree request; request.put ("action", "history"); request.put ("hash", send->hash ().to_string ()); request.put ("count", 100); auto response (wait_response (system, rpc, request)); std::vector<std::tuple<std::string, std::string, std::string, std::string>> history_l; auto & history_node (response.get_child ("history")); for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account", "-1"), i->second.get<std::string> ("amount", "-1"), i->second.get<std::string> ("hash"))); boost::optional<std::string> amount (i->second.get_optional<std::string> ("amount")); ASSERT_FALSE (amount.is_initialized ()); // Cannot calculate amount } ASSERT_EQ (1, history_l.size ()); ASSERT_EQ ("send", std::get<0> (history_l[0])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[0])); ASSERT_EQ ("-1", std::get<2> (history_l[0])); ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[0])); // Pruning action { rpc_ctx->io_scope->reset (); auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (1, node0->ledger.pruning_action (transaction, send->hash (), 1)); rpc_ctx->io_scope->renew (); } boost::property_tree::ptree request2; request2.put ("action", "history"); request2.put ("hash", receive->hash ().to_string ()); request2.put ("count", 100); auto response2 (wait_response (system, rpc, request2)); history_l.clear (); auto & history_node2 (response2.get_child ("history")); for (auto i (history_node2.begin ()), n (history_node2.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account", "-1"), i->second.get<std::string> ("amount", "-1"), i->second.get<std::string> ("hash"))); boost::optional<std::string> amount (i->second.get_optional<std::string> ("amount")); ASSERT_FALSE (amount.is_initialized ()); // Cannot calculate amount boost::optional<std::string> account (i->second.get_optional<std::string> ("account")); ASSERT_FALSE (account.is_initialized ()); // Cannot find source account } ASSERT_EQ (1, history_l.size ()); ASSERT_EQ ("receive", std::get<0> (history_l[0])); ASSERT_EQ ("-1", std::get<1> (history_l[0])); ASSERT_EQ ("-1", std::get<2> (history_l[0])); ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[0])); // Pruning action { rpc_ctx->io_scope->reset (); auto transaction (node0->store.tx_begin_write ()); ASSERT_EQ (1, node0->ledger.pruning_action (transaction, receive->hash (), 1)); rpc_ctx->io_scope->renew (); } boost::property_tree::ptree request3; request3.put ("action", "history"); request3.put ("hash", uchange->hash ().to_string ()); request3.put ("count", 100); auto response3 (wait_response (system, rpc, request3)); history_l.clear (); auto & history_node3 (response3.get_child ("history")); for (auto i (history_node3.begin ()), n (history_node3.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account", "-1"), i->second.get<std::string> ("amount", "-1"), i->second.get<std::string> ("hash"))); } ASSERT_EQ (2, history_l.size ()); ASSERT_EQ ("receive", std::get<0> (history_l[0])); ASSERT_EQ (ureceive->hash ().to_string (), std::get<3> (history_l[0])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[0])); ASSERT_EQ (nano::Gxrb_ratio.convert_to<std::string> (), std::get<2> (history_l[0])); ASSERT_EQ ("unknown", std::get<0> (history_l[1])); ASSERT_EQ ("-1", std::get<1> (history_l[1])); ASSERT_EQ ("-1", std::get<2> (history_l[1])); ASSERT_EQ (usend->hash ().to_string (), std::get<3> (history_l[1])); } TEST (rpc, process_block) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; send.serialize_json (json); request.put ("block", json); { auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node1->latest (nano::dev::genesis_key.pub) == send.hash ()); std::string send_hash (response.get<std::string> ("hash")); ASSERT_EQ (send.hash ().to_string (), send_hash); } request.put ("json_block", true); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_blocks::invalid_block); ASSERT_EQ (ec.message (), response.get<std::string> ("error")); } } TEST (rpc, process_json_block) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "process"); boost::property_tree::ptree block_node; send.serialize_json (block_node); request.add_child ("block", block_node); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_blocks::invalid_block); ASSERT_EQ (ec.message (), response.get<std::string> ("error")); } request.put ("json_block", true); { auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node1->latest (nano::dev::genesis_key.pub) == send.hash ()); std::string send_hash (response.get<std::string> ("hash")); ASSERT_EQ (send.hash ().to_string (), send_hash); } } TEST (rpc, process_block_async) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "process"); request.put ("async", "true"); std::string json; send.serialize_json (json); request.put ("block", json); request.put ("json_block", true); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_blocks::invalid_block); ASSERT_EQ (ec.message (), response.get<std::string> ("error")); } request.put ("json_block", false); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::is_not_state_block); ASSERT_EQ (ec.message (), response.get<std::string> ("error")); } auto state_send (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - 100, nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest))); std::string json1; state_send->serialize_json (json1); request.put ("block", json1); { auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("started")); ASSERT_TIMELY (10s, node1->latest (nano::dev::genesis_key.pub) == state_send->hash ()); } } TEST (rpc, process_block_no_work) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); send.block_work_set (0); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; send.serialize_json (json); request.put ("block", json); auto response (wait_response (system, rpc, request)); ASSERT_FALSE (response.get<std::string> ("error", "").empty ()); } TEST (rpc, process_republish) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); auto node3 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node3); nano::keypair key; auto latest (node1.latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node3->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; send.serialize_json (json); request.put ("block", json); auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node2.latest (nano::dev::genesis_key.pub) == send.hash ()); } TEST (rpc, process_subtype_send) { nano::system system; auto node1 = add_ipc_enabled_node (system); system.add_node (); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::state_block send (nano::dev::genesis->account (), latest, nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; send.serialize_json (json); request.put ("block", json); request.put ("subtype", "receive"); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::invalid_subtype_balance); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); request.put ("subtype", "change"); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ (response2.get<std::string> ("error"), ec.message ()); request.put ("subtype", "send"); auto response3 (wait_response (system, rpc, request)); ASSERT_EQ (send.hash ().to_string (), response3.get<std::string> ("hash")); ASSERT_TIMELY (10s, system.nodes[1]->latest (nano::dev::genesis_key.pub) == send.hash ()); } TEST (rpc, process_subtype_open) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto & node2 = *system.add_node (); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send = std::make_shared<nano::state_block> (nano::dev::genesis->account (), latest, nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); ASSERT_EQ (nano::process_result::progress, node1->process (*send).code); ASSERT_EQ (nano::process_result::progress, node2.process (*send).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); node1->scheduler.manual (send); nano::state_block open (key.pub, 0, key.pub, nano::Gxrb_ratio, send->hash (), key.prv, key.pub, *node1->work_generate_blocking (key.pub)); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; open.serialize_json (json); request.put ("block", json); request.put ("subtype", "send"); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::invalid_subtype_balance); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); request.put ("subtype", "epoch"); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ (response2.get<std::string> ("error"), ec.message ()); request.put ("subtype", "open"); auto response3 (wait_response (system, rpc, request)); ASSERT_EQ (open.hash ().to_string (), response3.get<std::string> ("hash")); ASSERT_TIMELY (10s, node2.latest (key.pub) == open.hash ()); } TEST (rpc, process_subtype_receive) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto & node2 = *system.add_node (); auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send = std::make_shared<nano::state_block> (nano::dev::genesis->account (), latest, nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); ASSERT_EQ (nano::process_result::progress, node1->process (*send).code); ASSERT_EQ (nano::process_result::progress, node2.process (*send).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); node1->scheduler.manual (send); nano::state_block receive (nano::dev::genesis_key.pub, send->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount, send->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send->hash ())); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; receive.serialize_json (json); request.put ("block", json); request.put ("subtype", "send"); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::invalid_subtype_balance); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); request.put ("subtype", "open"); auto response2 (wait_response (system, rpc, request)); ec = nano::error_rpc::invalid_subtype_previous; ASSERT_EQ (response2.get<std::string> ("error"), ec.message ()); request.put ("subtype", "receive"); auto response3 (wait_response (system, rpc, request)); ASSERT_EQ (receive.hash ().to_string (), response3.get<std::string> ("hash")); ASSERT_TIMELY (10s, node2.latest (nano::dev::genesis_key.pub) == receive.hash ()); } TEST (rpc, process_ledger_insufficient_work) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); ASSERT_LT (node->network_params.network.publish_thresholds.entry, node->network_params.network.publish_thresholds.epoch_1); auto latest (node->latest (nano::dev::genesis_key.pub)); auto min_difficulty = node->network_params.network.publish_thresholds.entry; auto max_difficulty = node->network_params.network.publish_thresholds.epoch_1; nano::state_block send (nano::dev::genesis->account (), latest, nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, system.work_generate_limited (latest, min_difficulty, max_difficulty)); ASSERT_LT (send.difficulty (), max_difficulty); ASSERT_GE (send.difficulty (), min_difficulty); boost::property_tree::ptree request; request.put ("action", "process"); std::string json; send.serialize_json (json); request.put ("block", json); request.put ("subtype", "send"); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_process::insufficient_work); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } TEST (rpc, keepalive) { nano::system system; auto node0 = add_ipc_enabled_node (system); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); node1->start (); system.nodes.push_back (node1); auto [rpc, rpc_ctx] = add_rpc (system, node0); boost::property_tree::ptree request; request.put ("action", "keepalive"); auto address (boost::str (boost::format ("%1%") % node1->network.endpoint ().address ())); auto port (boost::str (boost::format ("%1%") % node1->network.endpoint ().port ())); request.put ("address", address); request.put ("port", port); ASSERT_EQ (nullptr, node0->network.udp_channels.channel (node1->network.endpoint ())); ASSERT_EQ (0, node0->network.size ()); auto response (wait_response (system, rpc, request)); system.deadline_set (10s); while (node0->network.find_channel (node1->network.endpoint ()) == nullptr) { ASSERT_EQ (0, node0->network.size ()); ASSERT_NO_ERROR (system.poll ()); } node1->stop (); } TEST (rpc, peers) { nano::system system; auto node = add_ipc_enabled_node (system); auto port = nano::get_available_port (); system.add_node (nano::node_config (port, system.logging)); nano::endpoint endpoint (boost::asio::ip::make_address_v6 ("fc00::1"), 4000); node->network.udp_channels.insert (endpoint, node->network_params.protocol.protocol_version); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "peers"); auto response (wait_response (system, rpc, request)); auto & peers_node (response.get_child ("peers")); ASSERT_EQ (2, peers_node.size ()); ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), peers_node.get<std::string> ((boost::format ("[::1]:%1%") % port).str ())); // Previously "[::ffff:80.80.80.80]:4000", but IPv4 address cause "No such node thrown in the test body" issue with peers_node.get std::stringstream endpoint_text; endpoint_text << endpoint; ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), peers_node.get<std::string> (endpoint_text.str ())); } TEST (rpc, peers_node_id) { nano::system system; auto node = add_ipc_enabled_node (system); auto port = nano::get_available_port (); system.add_node (nano::node_config (port, system.logging)); nano::endpoint endpoint (boost::asio::ip::make_address_v6 ("fc00::1"), 4000); node->network.udp_channels.insert (endpoint, node->network_params.protocol.protocol_version); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "peers"); request.put ("peer_details", true); auto response (wait_response (system, rpc, request)); auto & peers_node (response.get_child ("peers")); ASSERT_EQ (2, peers_node.size ()); auto tree1 (peers_node.get_child ((boost::format ("[::1]:%1%") % port).str ())); ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), tree1.get<std::string> ("protocol_version")); ASSERT_EQ (system.nodes[1]->node_id.pub.to_node_id (), tree1.get<std::string> ("node_id")); std::stringstream endpoint_text; endpoint_text << endpoint; auto tree2 (peers_node.get_child (endpoint_text.str ())); ASSERT_EQ (std::to_string (node->network_params.protocol.protocol_version), tree2.get<std::string> ("protocol_version")); ASSERT_EQ ("", tree2.get<std::string> ("node_id")); } TEST (rpc, pending) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key1; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto block1 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 100)); node->scheduler.flush (); ASSERT_TIMELY (5s, !node->active.active (*block1)); ASSERT_TIMELY (5s, node->ledger.cache.cemented_count == 2 && node->confirmation_height_processor.current ().is_zero () && node->confirmation_height_processor.awaiting_processing_size () == 0); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "pending"); request.put ("account", key1.pub.to_account ()); request.put ("count", "100"); { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (1, blocks_node.size ()); nano::block_hash hash (blocks_node.begin ()->second.get<std::string> ("")); ASSERT_EQ (block1->hash (), hash); } request.put ("sorting", "true"); // Sorting test { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (1, blocks_node.size ()); nano::block_hash hash (blocks_node.begin ()->first); ASSERT_EQ (block1->hash (), hash); std::string amount (blocks_node.begin ()->second.get<std::string> ("")); ASSERT_EQ ("100", amount); } request.put ("threshold", "100"); // Threshold test { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (1, blocks_node.size ()); std::unordered_map<nano::block_hash, nano::uint128_union> blocks; for (auto i (blocks_node.begin ()), j (blocks_node.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); nano::uint128_union amount; amount.decode_dec (i->second.get<std::string> ("")); blocks[hash] = amount; boost::optional<std::string> source (i->second.get_optional<std::string> ("source")); ASSERT_FALSE (source.is_initialized ()); boost::optional<uint8_t> min_version (i->second.get_optional<uint8_t> ("min_version")); ASSERT_FALSE (min_version.is_initialized ()); } ASSERT_EQ (blocks[block1->hash ()], 100); } request.put ("threshold", "101"); { auto response (wait_response (system, rpc, request, 10s)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (0, blocks_node.size ()); } request.put ("threshold", "0"); request.put ("source", "true"); request.put ("min_version", "true"); { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (1, blocks_node.size ()); std::unordered_map<nano::block_hash, nano::uint128_union> amounts; std::unordered_map<nano::block_hash, nano::account> sources; for (auto i (blocks_node.begin ()), j (blocks_node.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); amounts[hash].decode_dec (i->second.get<std::string> ("amount")); sources[hash].decode_account (i->second.get<std::string> ("source")); ASSERT_EQ (i->second.get<uint8_t> ("min_version"), 0); } ASSERT_EQ (amounts[block1->hash ()], 100); ASSERT_EQ (sources[block1->hash ()], nano::dev::genesis_key.pub); } request.put ("account", key1.pub.to_account ()); request.put ("source", "false"); request.put ("min_version", "false"); auto check_block_response_count_l = [&system, &request, rpc = rpc] (size_t size) { auto response (wait_response (system, rpc, request)); ASSERT_EQ (size, response.get_child ("blocks").size ()); }; check_block_response_count_l (1); rpc_ctx->io_scope->reset (); reset_confirmation_height (system.nodes.front ()->store, block1->account ()); rpc_ctx->io_scope->renew (); check_block_response_count_l (0); request.put ("include_only_confirmed", "false"); rpc_ctx->io_scope->renew (); check_block_response_count_l (1); request.put ("include_only_confirmed", "true"); // Sorting with a smaller count than total should give absolute sorted amounts rpc_ctx->io_scope->reset (); node->store.confirmation_height.put (node->store.tx_begin_write (), nano::dev::genesis_key.pub, { 2, block1->hash () }); auto block2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 200)); auto block3 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 300)); auto block4 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 400)); rpc_ctx->io_scope->renew (); ASSERT_TIMELY (10s, node->ledger.account_pending (node->store.tx_begin_read (), key1.pub) == 1000); ASSERT_TIMELY (5s, !node->active.active (*block4)); ASSERT_TIMELY (5s, node->block_confirmed (block4->hash ())); request.put ("count", "2"); { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (2, blocks_node.size ()); nano::block_hash hash (blocks_node.begin ()->first); nano::block_hash hash1 ((++blocks_node.begin ())->first); ASSERT_EQ (block4->hash (), hash); ASSERT_EQ (block3->hash (), hash1); } } TEST (rpc, pending_burn) { nano::system system; auto node = add_ipc_enabled_node (system); nano::account burn (0); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto block1 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, burn, 100)); auto [rpc, rpc_ctx] = add_rpc (system, node); node->scheduler.flush (); ASSERT_TIMELY (5s, !node->active.active (*block1)); ASSERT_TIMELY (5s, node->ledger.cache.cemented_count == 2 && node->confirmation_height_processor.current ().is_zero () && node->confirmation_height_processor.awaiting_processing_size () == 0); boost::property_tree::ptree request; request.put ("action", "pending"); request.put ("account", burn.to_account ()); request.put ("count", "100"); { auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); ASSERT_EQ (1, blocks_node.size ()); nano::block_hash hash (blocks_node.begin ()->second.get<std::string> ("")); ASSERT_EQ (block1->hash (), hash); } } TEST (rpc, search_pending) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto wallet (node->wallets.items.begin ()->first.to_string ()); auto latest (node->latest (nano::dev::genesis_key.pub)); nano::send_block block (latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - node->config.receive_minimum.number (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node->work_generate_blocking (latest)); { auto transaction (node->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, block).code); } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "search_pending"); request.put ("wallet", wallet); auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node->balance (nano::dev::genesis_key.pub) == nano::dev::genesis_amount); } TEST (rpc, version) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "version"); test_response response1 (request1, rpc->config.port, system.io_ctx); ASSERT_TIMELY (5s, response1.status != 0); ASSERT_EQ (200, response1.status); ASSERT_EQ ("1", response1.json.get<std::string> ("rpc_version")); { auto transaction (node1->store.tx_begin_read ()); ASSERT_EQ (std::to_string (node1->store.version.get (transaction)), response1.json.get<std::string> ("store_version")); } ASSERT_EQ (std::to_string (node1->network_params.protocol.protocol_version), response1.json.get<std::string> ("protocol_version")); ASSERT_EQ (boost::str (boost::format ("Nano %1%") % NANO_VERSION_STRING), response1.json.get<std::string> ("node_vendor")); ASSERT_EQ (node1->store.vendor_get (), response1.json.get<std::string> ("store_vendor")); auto network_label (node1->network_params.network.get_current_network_as_string ()); ASSERT_EQ (network_label, response1.json.get<std::string> ("network")); auto genesis_open (node1->latest (nano::dev::genesis_key.pub)); ASSERT_EQ (genesis_open.to_string (), response1.json.get<std::string> ("network_identifier")); ASSERT_EQ (BUILD_INFO, response1.json.get<std::string> ("build_info")); auto headers (response1.resp.base ()); auto allow (headers.at ("Allow")); auto content_type (headers.at ("Content-Type")); auto access_control_allow_origin (headers.at ("Access-Control-Allow-Origin")); auto access_control_allow_methods (headers.at ("Access-Control-Allow-Methods")); auto access_control_allow_headers (headers.at ("Access-Control-Allow-Headers")); auto connection (headers.at ("Connection")); ASSERT_EQ ("POST, OPTIONS", allow); ASSERT_EQ ("application/json", content_type); ASSERT_EQ ("*", access_control_allow_origin); ASSERT_EQ (allow, access_control_allow_methods); ASSERT_EQ ("Accept, Accept-Language, Content-Language, Content-Type", access_control_allow_headers); ASSERT_EQ ("close", connection); } TEST (rpc, work_generate) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::block_hash hash (1); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); auto verify_response = [node, rpc = rpc, &system] (auto & request, auto & hash) { test_response response (request, rpc->config.port, system.io_ctx); ASSERT_TIMELY (5s, response.status != 0); ASSERT_EQ (200, response.status); ASSERT_EQ (hash.to_string (), response.json.get<std::string> ("hash")); auto work_text (response.json.get<std::string> ("work")); uint64_t work; ASSERT_FALSE (nano::from_string_hex (work_text, work)); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); auto response_difficulty_text (response.json.get<std::string> ("difficulty")); uint64_t response_difficulty; ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty)); ASSERT_EQ (result_difficulty, response_difficulty); auto multiplier = response.json.get<double> ("multiplier"); ASSERT_NEAR (nano::difficulty::to_multiplier (result_difficulty, node->default_difficulty (nano::work_version::work_1)), multiplier, 1e-6); }; verify_response (request, hash); request.put ("use_peers", "true"); verify_response (request, hash); } TEST (rpc, work_generate_difficulty) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.max_work_generate_multiplier = 1000; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::block_hash hash (1); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); { uint64_t difficulty (0xfff0000000000000); request.put ("difficulty", nano::to_string_hex (difficulty)); auto response (wait_response (system, rpc, request, 10s)); auto work_text (response.get<std::string> ("work")); uint64_t work; ASSERT_FALSE (nano::from_string_hex (work_text, work)); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); auto response_difficulty_text (response.get<std::string> ("difficulty")); uint64_t response_difficulty; ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty)); ASSERT_EQ (result_difficulty, response_difficulty); auto multiplier = response.get<double> ("multiplier"); // Expected multiplier from base threshold, not from the given difficulty ASSERT_NEAR (nano::difficulty::to_multiplier (result_difficulty, node->default_difficulty (nano::work_version::work_1)), multiplier, 1e-10); ASSERT_GE (result_difficulty, difficulty); } { uint64_t difficulty (0xffff000000000000); request.put ("difficulty", nano::to_string_hex (difficulty)); auto response (wait_response (system, rpc, request)); auto work_text (response.get<std::string> ("work")); uint64_t work; ASSERT_FALSE (nano::from_string_hex (work_text, work)); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); ASSERT_GE (result_difficulty, difficulty); } { uint64_t difficulty (node->max_work_generate_difficulty (nano::work_version::work_1) + 1); request.put ("difficulty", nano::to_string_hex (difficulty)); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::difficulty_limit); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } TEST (rpc, work_generate_multiplier) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.max_work_generate_multiplier = 100; auto node = add_ipc_enabled_node (system, node_config); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::block_hash hash (1); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); { // When both difficulty and multiplier are given, should use multiplier // Give base difficulty and very high multiplier to test request.put ("difficulty", nano::to_string_hex (0xff00000000000000)); double multiplier{ 100.0 }; request.put ("multiplier", multiplier); auto response (wait_response (system, rpc, request, 10s)); auto work_text (response.get_optional<std::string> ("work")); ASSERT_TRUE (work_text.is_initialized ()); uint64_t work; ASSERT_FALSE (nano::from_string_hex (*work_text, work)); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); auto response_difficulty_text (response.get<std::string> ("difficulty")); uint64_t response_difficulty; ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty)); ASSERT_EQ (result_difficulty, response_difficulty); auto result_multiplier = response.get<double> ("multiplier"); ASSERT_GE (result_multiplier, multiplier); } { request.put ("multiplier", -1.5); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::bad_multiplier_format); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } { double max_multiplier (nano::difficulty::to_multiplier (node->max_work_generate_difficulty (nano::work_version::work_1), node->default_difficulty (nano::work_version::work_1))); request.put ("multiplier", max_multiplier + 1); auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_rpc::difficulty_limit); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } TEST (rpc, work_generate_block_high) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; nano::state_block block (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, 123, key.prv, key.pub, *node->work_generate_blocking (key.pub)); nano::block_hash hash (block.root ().as_block_hash ()); auto block_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, block.block_work ())); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); request.put ("json_block", "true"); boost::property_tree::ptree json; block.serialize_json (json); request.add_child ("block", json); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (std::error_code (nano::error_rpc::block_work_enough).message (), response.get<std::string> ("error")); } } TEST (rpc, work_generate_block_low) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; nano::state_block block (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, 123, key.prv, key.pub, 0); auto threshold (node->default_difficulty (block.work_version ())); block.block_work_set (system.work_generate_limited (block.root ().as_block_hash (), threshold, nano::difficulty::from_multiplier (node->config.max_work_generate_multiplier / 10, threshold))); nano::block_hash hash (block.root ().as_block_hash ()); auto block_difficulty (block.difficulty ()); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); request.put ("difficulty", nano::to_string_hex (block_difficulty + 1)); request.put ("json_block", "false"); std::string json; block.serialize_json (json); request.put ("block", json); { auto response (wait_response (system, rpc, request, 10s)); auto work_text (response.get_optional<std::string> ("work")); ASSERT_TRUE (work_text.is_initialized ()); uint64_t work; ASSERT_FALSE (nano::from_string_hex (*work_text, work)); ASSERT_NE (block.block_work (), work); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); auto response_difficulty_text (response.get<std::string> ("difficulty")); uint64_t response_difficulty; ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty)); ASSERT_EQ (result_difficulty, response_difficulty); ASSERT_LT (block_difficulty, result_difficulty); } } TEST (rpc, work_generate_block_root_mismatch) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; nano::state_block block (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, 123, key.prv, key.pub, *node->work_generate_blocking (key.pub)); nano::block_hash hash (1); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); request.put ("json_block", "false"); std::string json; block.serialize_json (json); request.put ("block", json); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (std::error_code (nano::error_rpc::block_root_mismatch).message (), response.get<std::string> ("error")); } } TEST (rpc, work_generate_block_ledger_epoch_2) { nano::system system; auto node = add_ipc_enabled_node (system); auto epoch1 = system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1); ASSERT_NE (nullptr, epoch1); auto epoch2 = system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2); ASSERT_NE (nullptr, epoch2); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); nano::state_block block (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, send_block->hash (), key.prv, key.pub, 0); auto threshold (nano::work_threshold (block.work_version (), nano::block_details (nano::epoch::epoch_2, false, true, false))); block.block_work_set (system.work_generate_limited (block.root ().as_block_hash (), 1, threshold - 1)); nano::block_hash hash (block.root ().as_block_hash ()); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); request.put ("json_block", "false"); std::string json; block.serialize_json (json); request.put ("block", json); bool finished (false); auto iteration (0); while (!finished) { auto response (wait_response (system, rpc, request, 10s)); auto work_text (response.get_optional<std::string> ("work")); ASSERT_TRUE (work_text.is_initialized ()); uint64_t work; ASSERT_FALSE (nano::from_string_hex (*work_text, work)); auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work)); auto response_difficulty_text (response.get<std::string> ("difficulty")); uint64_t response_difficulty; ASSERT_FALSE (nano::from_string_hex (response_difficulty_text, response_difficulty)); ASSERT_EQ (result_difficulty, response_difficulty); ASSERT_GE (result_difficulty, node->network_params.network.publish_thresholds.epoch_2_receive); finished = result_difficulty < node->network_params.network.publish_thresholds.epoch_1; ASSERT_LT (++iteration, 200); } } TEST (rpc, work_cancel) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::block_hash hash1 (1); boost::property_tree::ptree request1; request1.put ("action", "work_cancel"); request1.put ("hash", hash1.to_string ()); std::atomic<bool> done (false); system.deadline_set (10s); while (!done) { system.work.generate (nano::work_version::work_1, hash1, node1->network_params.network.publish_thresholds.base, [&done] (boost::optional<uint64_t> work_a) { done = !work_a; }); auto response1 (wait_response (system, rpc, request1)); std::error_code ec; ASSERT_NO_ERROR (ec); std::string success (response1.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); } } TEST (rpc, work_peer_bad) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto & node2 = *system.add_node (); node2.config.work_peers.push_back (std::make_pair (boost::asio::ip::address_v6::any ().to_string (), 0)); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::block_hash hash1 (1); std::atomic<uint64_t> work (0); node2.work_generate (nano::work_version::work_1, hash1, node2.network_params.network.publish_thresholds.base, [&work] (boost::optional<uint64_t> work_a) { ASSERT_TRUE (work_a.is_initialized ()); work = *work_a; }); ASSERT_TIMELY (5s, nano::work_difficulty (nano::work_version::work_1, hash1, work) >= nano::work_threshold_base (nano::work_version::work_1)); } TEST (rpc, work_peer_one) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto & node2 = *system.add_node (); auto [rpc, rpc_ctx] = add_rpc (system, node1); node2.config.work_peers.push_back (std::make_pair (node1->network.endpoint ().address ().to_string (), rpc->config.port)); nano::keypair key1; std::atomic<uint64_t> work (0); node2.work_generate (nano::work_version::work_1, key1.pub, node1->network_params.network.publish_thresholds.base, [&work] (boost::optional<uint64_t> work_a) { ASSERT_TRUE (work_a.is_initialized ()); work = *work_a; }); ASSERT_TIMELY (5s, nano::work_difficulty (nano::work_version::work_1, key1.pub, work) >= nano::work_threshold_base (nano::work_version::work_1)); } TEST (rpc, work_peer_many) { nano::system system1 (1); nano::system system2; nano::system system3 (1); nano::system system4 (1); auto & node1 (*system1.nodes[0]); auto node2 = add_ipc_enabled_node (system2); auto node3 = add_ipc_enabled_node (system3); auto node4 = add_ipc_enabled_node (system4); auto [rpc2, rpc_ctx_2] = add_rpc (system2, node2); auto [rpc3, rpc_ctx_3] = add_rpc (system3, node3); auto [rpc4, rpc_ctx_4] = add_rpc (system4, node4); node1.config.work_peers.push_back (std::make_pair (node2->network.endpoint ().address ().to_string (), rpc2->config.port)); node1.config.work_peers.push_back (std::make_pair (node3->network.endpoint ().address ().to_string (), rpc3->config.port)); node1.config.work_peers.push_back (std::make_pair (node4->network.endpoint ().address ().to_string (), rpc4->config.port)); std::array<std::atomic<uint64_t>, 10> works; for (auto i (0); i < works.size (); ++i) { nano::keypair key1; node1.work_generate (nano::work_version::work_1, key1.pub, node1.network_params.network.publish_thresholds.base, [&work = works[i]] (boost::optional<uint64_t> work_a) { work = *work_a; }); while (nano::work_difficulty (nano::work_version::work_1, key1.pub, works[i]) < nano::work_threshold_base (nano::work_version::work_1)) { system1.poll (); system2.poll (); system3.poll (); system4.poll (); } } node1.stop (); } TEST (rpc, work_version_invalid) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::block_hash hash (1); boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", hash.to_string ()); request.put ("version", "work_invalid"); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (std::error_code (nano::error_rpc::bad_work_version).message (), response.get<std::string> ("error")); } request.put ("action", "work_validate"); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (std::error_code (nano::error_rpc::bad_work_version).message (), response.get<std::string> ("error")); } } TEST (rpc, block_count) { { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "block_count"); { auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("count")); ASSERT_EQ ("0", response1.get<std::string> ("unchecked")); ASSERT_EQ ("1", response1.get<std::string> ("cemented")); } } // Should be able to get all counts even when enable_control is false. { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "block_count"); { auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("count")); ASSERT_EQ ("0", response1.get<std::string> ("unchecked")); ASSERT_EQ ("1", response1.get<std::string> ("cemented")); } } } TEST (rpc, block_count_pruning) { nano::system system; auto & node0 = *system.add_node (); nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto node1 = add_ipc_enabled_node (system, node_config, node_flags); auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send1 (std::make_shared<nano::send_block> (latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest))); node1->process_active (send1); auto receive1 (std::make_shared<nano::receive_block> (send1->hash (), send1->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send1->hash ()))); node1->process_active (receive1); node1->block_processor.flush (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_TIMELY (5s, node1->ledger.cache.cemented_count == 3 && node1->confirmation_height_processor.current ().is_zero () && node1->confirmation_height_processor.awaiting_processing_size () == 0); // Pruning action { auto transaction (node1->store.tx_begin_write ()); ASSERT_EQ (1, node1->ledger.pruning_action (transaction, send1->hash (), 1)); } auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "block_count"); { auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("3", response1.get<std::string> ("count")); ASSERT_EQ ("0", response1.get<std::string> ("unchecked")); ASSERT_EQ ("3", response1.get<std::string> ("cemented")); ASSERT_EQ ("2", response1.get<std::string> ("full")); ASSERT_EQ ("1", response1.get<std::string> ("pruned")); } } TEST (rpc, frontier_count) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "frontier_count"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("count")); } TEST (rpc, account_count) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "account_count"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("count")); } TEST (rpc, available_supply) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "available_supply"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("0", response1.get<std::string> ("available")); rpc_ctx->io_scope->reset (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); rpc_ctx->io_scope->renew (); auto response2 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response2.get<std::string> ("available")); rpc_ctx->io_scope->reset (); auto block2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, 0, 100)); // Sending to burning 0 account rpc_ctx->io_scope->renew (); auto response3 (wait_response (system, rpc, request1, 10s)); ASSERT_EQ ("1", response3.get<std::string> ("available")); } TEST (rpc, mrai_to_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "mrai_to_raw"); request1.put ("amount", "1"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ (nano::Mxrb_ratio.convert_to<std::string> (), response1.get<std::string> ("amount")); } TEST (rpc, mrai_from_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "mrai_from_raw"); request1.put ("amount", nano::Mxrb_ratio.convert_to<std::string> ()); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("amount")); } TEST (rpc, krai_to_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "krai_to_raw"); request1.put ("amount", "1"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ (nano::kxrb_ratio.convert_to<std::string> (), response1.get<std::string> ("amount")); } TEST (rpc, krai_from_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "krai_from_raw"); request1.put ("amount", nano::kxrb_ratio.convert_to<std::string> ()); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("amount")); } TEST (rpc, nano_to_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "nano_to_raw"); request1.put ("amount", "1"); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ (nano::xrb_ratio.convert_to<std::string> (), response1.get<std::string> ("amount")); } TEST (rpc, nano_from_raw) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request1; request1.put ("action", "nano_from_raw"); request1.put ("amount", nano::xrb_ratio.convert_to<std::string> ()); auto response1 (wait_response (system, rpc, request1)); ASSERT_EQ ("1", response1.get<std::string> ("amount")); } TEST (rpc, account_representative) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("account", nano::dev::genesis->account ().to_account ()); request.put ("action", "account_representative"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("representative")); ASSERT_EQ (account_text1, nano::dev::genesis->account ().to_account ()); } TEST (rpc, account_representative_set) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; nano::keypair rep; request.put ("account", nano::dev::genesis->account ().to_account ()); request.put ("representative", rep.pub.to_account ()); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("action", "account_representative_set"); auto response (wait_response (system, rpc, request)); std::string block_text1 (response.get<std::string> ("block")); nano::block_hash hash; ASSERT_FALSE (hash.decode_hex (block_text1)); ASSERT_FALSE (hash.is_zero ()); auto transaction (node->store.tx_begin_read ()); ASSERT_TRUE (node->store.block.exists (transaction, hash)); ASSERT_EQ (rep.pub, node->store.block.get (transaction, hash)->representative ()); } TEST (rpc, account_representative_set_work_disabled) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.work_threads = 0; auto node = add_ipc_enabled_node (system, node_config); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; nano::keypair rep; request.put ("account", nano::dev::genesis->account ().to_account ()); request.put ("representative", rep.pub.to_account ()); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("action", "account_representative_set"); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::disabled_work_generation).message (), response.get<std::string> ("error")); } } TEST (rpc, account_representative_set_epoch_2) { nano::system system; auto node = add_ipc_enabled_node (system); // Upgrade the genesis account to epoch 2 ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1)); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv, false); auto target_difficulty = nano::work_threshold (nano::work_version::work_1, nano::block_details (nano::epoch::epoch_2, false, false, false)); ASSERT_LT (node->network_params.network.publish_thresholds.entry, target_difficulty); auto min_difficulty = node->network_params.network.publish_thresholds.entry; auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "account_representative_set"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("representative", nano::keypair ().pub.to_account ()); // Test that the correct error is given if there is insufficient work auto insufficient = system.work_generate_limited (nano::dev::genesis->hash (), min_difficulty, target_difficulty); request.put ("work", nano::to_string_hex (insufficient)); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::invalid_work); ASSERT_EQ (1, response.count ("error")); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } TEST (rpc, bootstrap) { nano::system system0; auto node = add_ipc_enabled_node (system0); nano::system system1 (1); auto node1 = system1.nodes[0]; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, nano::dev::genesis->account (), 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); { auto transaction (node1->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node1->ledger.process (transaction, send).code); } auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "bootstrap"); request.put ("address", "::ffff:127.0.0.1"); request.put ("port", node1->network.endpoint ().port ()); test_response response (request, rpc->config.port, system0.io_ctx); while (response.status == 0) { system0.poll (); } system1.deadline_set (10s); while (node->latest (nano::dev::genesis->account ()) != node1->latest (nano::dev::genesis->account ())) { ASSERT_NO_ERROR (system0.poll ()); ASSERT_NO_ERROR (system1.poll ()); } } TEST (rpc, account_remove) { nano::system system0; auto node = add_ipc_enabled_node (system0); auto key1 (system0.wallet (0)->deterministic_insert ()); ASSERT_TRUE (system0.wallet (0)->exists (key1)); auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "account_remove"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("account", key1.to_account ()); auto response (wait_response (system0, rpc, request)); ASSERT_FALSE (system0.wallet (0)->exists (key1)); } TEST (rpc, representatives) { nano::system system0; auto node = add_ipc_enabled_node (system0); auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "representatives"); auto response (wait_response (system0, rpc, request)); auto & representatives_node (response.get_child ("representatives")); std::vector<nano::account> representatives; for (auto i (representatives_node.begin ()), n (representatives_node.end ()); i != n; ++i) { nano::account account; ASSERT_FALSE (account.decode_account (i->first)); representatives.push_back (account); } ASSERT_EQ (1, representatives.size ()); ASSERT_EQ (nano::dev::genesis->account (), representatives[0]); } // wallet_seed is only available over IPC's unsafe encoding, and when running on test network TEST (rpc, wallet_seed) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::raw_key seed; { auto transaction (node->wallets.tx_begin_read ()); system.wallet (0)->store.seed (seed, transaction); } boost::property_tree::ptree request; request.put ("action", "wallet_seed"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system, rpc, request)); { std::string seed_text (response.get<std::string> ("seed")); ASSERT_EQ (seed.to_string (), seed_text); } } TEST (rpc, wallet_change_seed) { nano::system system0; auto node = add_ipc_enabled_node (system0); auto [rpc, rpc_ctx] = add_rpc (system0, node); nano::raw_key seed; nano::random_pool::generate_block (seed.bytes.data (), seed.bytes.size ()); { auto transaction (node->wallets.tx_begin_read ()); nano::raw_key seed0; nano::random_pool::generate_block (seed0.bytes.data (), seed0.bytes.size ()); system0.wallet (0)->store.seed (seed0, transaction); ASSERT_NE (seed, seed0); } auto prv = nano::deterministic_key (seed, 0); auto pub (nano::pub_key (prv)); boost::property_tree::ptree request; request.put ("action", "wallet_change_seed"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("seed", seed.to_string ()); auto response (wait_response (system0, rpc, request)); { auto transaction (node->wallets.tx_begin_read ()); nano::raw_key seed0; system0.wallet (0)->store.seed (seed0, transaction); ASSERT_EQ (seed, seed0); } auto account_text (response.get<std::string> ("last_restored_account")); nano::account account; ASSERT_FALSE (account.decode_account (account_text)); ASSERT_TRUE (system0.wallet (0)->exists (account)); ASSERT_EQ (pub, account); ASSERT_EQ ("1", response.get<std::string> ("restored_count")); } TEST (rpc, wallet_frontiers) { nano::system system0; auto node = add_ipc_enabled_node (system0); system0.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "wallet_frontiers"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system0, rpc, request)); auto & frontiers_node (response.get_child ("frontiers")); std::vector<nano::account> frontiers; for (auto i (frontiers_node.begin ()), n (frontiers_node.end ()); i != n; ++i) { frontiers.push_back (nano::account (i->second.get<std::string> (""))); } ASSERT_EQ (1, frontiers.size ()); ASSERT_EQ (node->latest (nano::dev::genesis->account ()), frontiers[0]); } TEST (rpc, work_validate) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::block_hash hash (1); uint64_t work1 (*node1->work_generate_blocking (hash)); boost::property_tree::ptree request; request.put ("action", "work_validate"); request.put ("hash", hash.to_string ()); request.put ("work", nano::to_string_hex (work1)); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (0, response.count ("valid")); ASSERT_TRUE (response.get<bool> ("valid_all")); ASSERT_TRUE (response.get<bool> ("valid_receive")); std::string difficulty_text (response.get<std::string> ("difficulty")); uint64_t difficulty; ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty)); ASSERT_GE (difficulty, node1->default_difficulty (nano::work_version::work_1)); double multiplier (response.get<double> ("multiplier")); ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, node1->default_difficulty (nano::work_version::work_1)), 1e-6); } uint64_t work2 (0); request.put ("work", nano::to_string_hex (work2)); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (0, response.count ("valid")); ASSERT_FALSE (response.get<bool> ("valid_all")); ASSERT_FALSE (response.get<bool> ("valid_receive")); std::string difficulty_text (response.get<std::string> ("difficulty")); uint64_t difficulty; ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty)); ASSERT_GE (node1->default_difficulty (nano::work_version::work_1), difficulty); double multiplier (response.get<double> ("multiplier")); ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, node1->default_difficulty (nano::work_version::work_1)), 1e-6); } auto result_difficulty (nano::work_difficulty (nano::work_version::work_1, hash, work1)); ASSERT_GE (result_difficulty, node1->default_difficulty (nano::work_version::work_1)); request.put ("work", nano::to_string_hex (work1)); request.put ("difficulty", nano::to_string_hex (result_difficulty)); { auto response (wait_response (system, rpc, request)); ASSERT_TRUE (response.get<bool> ("valid")); ASSERT_TRUE (response.get<bool> ("valid_all")); ASSERT_TRUE (response.get<bool> ("valid_receive")); } uint64_t difficulty4 (0xfff0000000000000); request.put ("work", nano::to_string_hex (work1)); request.put ("difficulty", nano::to_string_hex (difficulty4)); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (result_difficulty >= difficulty4, response.get<bool> ("valid")); ASSERT_EQ (result_difficulty >= node1->default_difficulty (nano::work_version::work_1), response.get<bool> ("valid_all")); ASSERT_EQ (result_difficulty >= node1->network_params.network.publish_thresholds.epoch_2_receive, response.get<bool> ("valid_all")); } uint64_t work3 (*node1->work_generate_blocking (hash, difficulty4)); request.put ("work", nano::to_string_hex (work3)); { auto response (wait_response (system, rpc, request)); ASSERT_TRUE (response.get<bool> ("valid")); ASSERT_TRUE (response.get<bool> ("valid_all")); ASSERT_TRUE (response.get<bool> ("valid_receive")); } } TEST (rpc, work_validate_epoch_2) { nano::system system; auto node = add_ipc_enabled_node (system); auto epoch1 = system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1); ASSERT_NE (nullptr, epoch1); ASSERT_EQ (node->network_params.network.publish_thresholds.epoch_2, node->network_params.network.publish_thresholds.base); auto work = system.work_generate_limited (epoch1->hash (), node->network_params.network.publish_thresholds.epoch_1, node->network_params.network.publish_thresholds.base); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "work_validate"); request.put ("hash", epoch1->hash ().to_string ()); request.put ("work", nano::to_string_hex (work)); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (0, response.count ("valid")); ASSERT_FALSE (response.get<bool> ("valid_all")); ASSERT_TRUE (response.get<bool> ("valid_receive")); std::string difficulty_text (response.get<std::string> ("difficulty")); uint64_t difficulty{ 0 }; ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty)); double multiplier (response.get<double> ("multiplier")); ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, node->network_params.network.publish_thresholds.epoch_2), 1e-6); }; // After upgrading, the higher difficulty is used to validate and calculate the multiplier rpc_ctx->io_scope->reset (); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); rpc_ctx->io_scope->renew (); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (0, response.count ("valid")); ASSERT_FALSE (response.get<bool> ("valid_all")); ASSERT_TRUE (response.get<bool> ("valid_receive")); std::string difficulty_text (response.get<std::string> ("difficulty")); uint64_t difficulty{ 0 }; ASSERT_FALSE (nano::from_string_hex (difficulty_text, difficulty)); double multiplier (response.get<double> ("multiplier")); ASSERT_NEAR (multiplier, nano::difficulty::to_multiplier (difficulty, node->default_difficulty (nano::work_version::work_1)), 1e-6); }; } TEST (rpc, successors) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); ASSERT_NE (nullptr, block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "successors"); request.put ("block", genesis.to_string ()); request.put ("count", std::to_string (std::numeric_limits<uint64_t>::max ())); auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (2, blocks.size ()); ASSERT_EQ (genesis, blocks[0]); ASSERT_EQ (block->hash (), blocks[1]); // RPC chain "reverse" option request.put ("action", "chain"); request.put ("reverse", "true"); auto response2 (wait_response (system, rpc, request, 10s)); ASSERT_EQ (response, response2); } TEST (rpc, bootstrap_any) { nano::system system0; auto node = add_ipc_enabled_node (system0); nano::system system1 (1); auto latest (system1.nodes[0]->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, nano::dev::genesis->account (), 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system1.nodes[0]->work_generate_blocking (latest)); { auto transaction (system1.nodes[0]->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, system1.nodes[0]->ledger.process (transaction, send).code); } auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "bootstrap_any"); auto response (wait_response (system0, rpc, request)); std::string success (response.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); } TEST (rpc, republish) { nano::system system; nano::keypair key; nano::genesis genesis; auto node1 = add_ipc_enabled_node (system); system.add_node (); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "republish"); request.put ("hash", send.hash ().to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, system.nodes[1]->balance (nano::dev::genesis_key.pub) != nano::dev::genesis_amount); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (1, blocks.size ()); ASSERT_EQ (send.hash (), blocks[0]); request.put ("hash", genesis.hash ().to_string ()); request.put ("count", 1); auto response1 (wait_response (system, rpc, request)); blocks_node = response1.get_child ("blocks"); blocks.clear (); for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (1, blocks.size ()); ASSERT_EQ (genesis.hash (), blocks[0]); request.put ("hash", open.hash ().to_string ()); request.put ("sources", 2); auto response2 (wait_response (system, rpc, request)); blocks_node = response2.get_child ("blocks"); blocks.clear (); for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (3, blocks.size ()); ASSERT_EQ (genesis.hash (), blocks[0]); ASSERT_EQ (send.hash (), blocks[1]); ASSERT_EQ (open.hash (), blocks[2]); } TEST (rpc, deterministic_key) { nano::system system0; auto node = add_ipc_enabled_node (system0); nano::raw_key seed; { auto transaction (system0.nodes[0]->wallets.tx_begin_read ()); system0.wallet (0)->store.seed (seed, transaction); } nano::account account0 (system0.wallet (0)->deterministic_insert ()); nano::account account1 (system0.wallet (0)->deterministic_insert ()); nano::account account2 (system0.wallet (0)->deterministic_insert ()); auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "deterministic_key"); request.put ("seed", seed.to_string ()); request.put ("index", "0"); auto response0 (wait_response (system0, rpc, request)); std::string validate_text (response0.get<std::string> ("account")); ASSERT_EQ (account0.to_account (), validate_text); request.put ("index", "2"); auto response1 (wait_response (system0, rpc, request)); validate_text = response1.get<std::string> ("account"); ASSERT_NE (account1.to_account (), validate_text); ASSERT_EQ (account2.to_account (), validate_text); } TEST (rpc, accounts_balances) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "accounts_balances"); boost::property_tree::ptree entry; boost::property_tree::ptree peers_l; entry.put ("", nano::dev::genesis_key.pub.to_account ()); peers_l.push_back (std::make_pair ("", entry)); request.add_child ("accounts", peers_l); auto response (wait_response (system, rpc, request)); for (auto & balances : response.get_child ("balances")) { std::string account_text (balances.first); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string balance_text (balances.second.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211455", balance_text); std::string pending_text (balances.second.get<std::string> ("pending")); ASSERT_EQ ("0", pending_text); } } TEST (rpc, accounts_frontiers) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "accounts_frontiers"); boost::property_tree::ptree entry; boost::property_tree::ptree peers_l; entry.put ("", nano::dev::genesis_key.pub.to_account ()); peers_l.push_back (std::make_pair ("", entry)); request.add_child ("accounts", peers_l); auto response (wait_response (system, rpc, request)); for (auto & frontiers : response.get_child ("frontiers")) { std::string account_text (frontiers.first); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string frontier_text (frontiers.second.get<std::string> ("")); ASSERT_EQ (node->latest (nano::dev::genesis->account ()), nano::block_hash{ frontier_text }); } } TEST (rpc, accounts_pending) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key1; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto block1 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 100)); node->scheduler.flush (); ASSERT_TIMELY (5s, !node->active.active (*block1)); ASSERT_TIMELY (5s, node->ledger.cache.cemented_count == 2 && node->confirmation_height_processor.current ().is_zero () && node->confirmation_height_processor.awaiting_processing_size () == 0); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "accounts_pending"); boost::property_tree::ptree entry; boost::property_tree::ptree peers_l; entry.put ("", key1.pub.to_account ()); peers_l.push_back (std::make_pair ("", entry)); request.add_child ("accounts", peers_l); request.put ("count", "100"); { auto response (wait_response (system, rpc, request)); for (auto & blocks : response.get_child ("blocks")) { std::string account_text (blocks.first); ASSERT_EQ (key1.pub.to_account (), account_text); nano::block_hash hash1 (blocks.second.begin ()->second.get<std::string> ("")); ASSERT_EQ (block1->hash (), hash1); } } request.put ("sorting", "true"); // Sorting test { auto response (wait_response (system, rpc, request)); for (auto & blocks : response.get_child ("blocks")) { std::string account_text (blocks.first); ASSERT_EQ (key1.pub.to_account (), account_text); nano::block_hash hash1 (blocks.second.begin ()->first); ASSERT_EQ (block1->hash (), hash1); std::string amount (blocks.second.begin ()->second.get<std::string> ("")); ASSERT_EQ ("100", amount); } } request.put ("threshold", "100"); // Threshold test { auto response (wait_response (system, rpc, request)); std::unordered_map<nano::block_hash, nano::uint128_union> blocks; for (auto & pending : response.get_child ("blocks")) { std::string account_text (pending.first); ASSERT_EQ (key1.pub.to_account (), account_text); for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); nano::uint128_union amount; amount.decode_dec (i->second.get<std::string> ("")); blocks[hash] = amount; boost::optional<std::string> source (i->second.get_optional<std::string> ("source")); ASSERT_FALSE (source.is_initialized ()); } } ASSERT_EQ (blocks[block1->hash ()], 100); } request.put ("source", "true"); { auto response (wait_response (system, rpc, request)); std::unordered_map<nano::block_hash, nano::uint128_union> amounts; std::unordered_map<nano::block_hash, nano::account> sources; for (auto & pending : response.get_child ("blocks")) { std::string account_text (pending.first); ASSERT_EQ (key1.pub.to_account (), account_text); for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); amounts[hash].decode_dec (i->second.get<std::string> ("amount")); sources[hash].decode_account (i->second.get<std::string> ("source")); } } ASSERT_EQ (amounts[block1->hash ()], 100); ASSERT_EQ (sources[block1->hash ()], nano::dev::genesis_key.pub); } check_block_response_count (system, rpc, request, 1); rpc_ctx->io_scope->reset (); reset_confirmation_height (system.nodes.front ()->store, block1->account ()); rpc_ctx->io_scope->renew (); check_block_response_count (system, rpc, request, 0); request.put ("include_only_confirmed", "false"); rpc_ctx->io_scope->renew (); check_block_response_count (system, rpc, request, 1); } TEST (rpc, blocks) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "blocks"); boost::property_tree::ptree entry; boost::property_tree::ptree peers_l; entry.put ("", node->latest (nano::dev::genesis->account ()).to_string ()); peers_l.push_back (std::make_pair ("", entry)); request.add_child ("hashes", peers_l); auto response (wait_response (system, rpc, request)); for (auto & blocks : response.get_child ("blocks")) { std::string hash_text (blocks.first); ASSERT_EQ (node->latest (nano::dev::genesis->account ()).to_string (), hash_text); std::string blocks_text (blocks.second.get<std::string> ("")); ASSERT_FALSE (blocks_text.empty ()); } } TEST (rpc, wallet_info) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = true; auto node = add_ipc_enabled_node (system, node_config); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key; system.wallet (0)->insert_adhoc (key.prv); // at first, 1 block and 1 confirmed -- the genesis ASSERT_EQ (1, node->ledger.cache.block_count); ASSERT_EQ (1, node->ledger.cache.cemented_count); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); // after the send, expect 2 blocks immediately, then 2 confirmed in a timely manner, // and finally 3 blocks and 3 confirmed after the wallet generates the receive block for this send ASSERT_EQ (2, node->ledger.cache.block_count); ASSERT_TIMELY (5s, 2 == node->ledger.cache.cemented_count); ASSERT_TIMELY (5s, 3 == node->ledger.cache.block_count && 3 == node->ledger.cache.cemented_count); // do another send to be able to expect some "pending" down below auto send2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); ASSERT_EQ (4, node->ledger.cache.block_count); nano::account account (system.wallet (0)->deterministic_insert ()); { auto transaction (node->wallets.tx_begin_write ()); system.wallet (0)->store.erase (transaction, account); } account = system.wallet (0)->deterministic_insert (); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_info"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system, rpc, request)); std::string balance_text (response.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211454", balance_text); std::string pending_text (response.get<std::string> ("pending")); ASSERT_EQ ("1", pending_text); std::string count_text (response.get<std::string> ("accounts_count")); ASSERT_EQ ("3", count_text); std::string block_count_text (response.get<std::string> ("accounts_block_count")); ASSERT_EQ ("4", block_count_text); std::string cemented_block_count_text (response.get<std::string> ("accounts_cemented_block_count")); ASSERT_EQ ("3", cemented_block_count_text); std::string adhoc_count (response.get<std::string> ("adhoc_count")); ASSERT_EQ ("2", adhoc_count); std::string deterministic_count (response.get<std::string> ("deterministic_count")); ASSERT_EQ ("1", deterministic_count); std::string index_text (response.get<std::string> ("deterministic_index")); ASSERT_EQ ("2", index_text); } TEST (rpc, wallet_balances) { nano::system system0; auto node = add_ipc_enabled_node (system0); system0.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "wallet_balances"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system0, rpc, request)); for (auto & balances : response.get_child ("balances")) { std::string account_text (balances.first); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string balance_text (balances.second.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211455", balance_text); std::string pending_text (balances.second.get<std::string> ("pending")); ASSERT_EQ ("0", pending_text); } nano::keypair key; rpc_ctx->io_scope->reset (); system0.wallet (0)->insert_adhoc (key.prv); auto send (system0.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, 1)); rpc_ctx->io_scope->renew (); request.put ("threshold", "2"); auto response1 (wait_response (system0, rpc, request)); for (auto & balances : response1.get_child ("balances")) { std::string account_text (balances.first); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string balance_text (balances.second.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211454", balance_text); std::string pending_text (balances.second.get<std::string> ("pending")); ASSERT_EQ ("0", pending_text); } } TEST (rpc, pending_exists) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key1; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto hash0 (node->latest (nano::dev::genesis->account ())); auto block1 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 100)); node->scheduler.flush (); ASSERT_TIMELY (5s, !node->active.active (*block1)); ASSERT_TIMELY (5s, node->ledger.cache.cemented_count == 2 && node->confirmation_height_processor.current ().is_zero () && node->confirmation_height_processor.awaiting_processing_size () == 0); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; auto pending_exists = [&system, &request, rpc = rpc] (const char * exists_a) { auto response0 (wait_response (system, rpc, request)); std::string exists_text (response0.get<std::string> ("exists")); ASSERT_EQ (exists_a, exists_text); }; request.put ("action", "pending_exists"); request.put ("hash", hash0.to_string ()); pending_exists ("0"); node->store.pending.exists (node->store.tx_begin_read (), nano::pending_key (nano::dev::genesis_key.pub, block1->hash ())); request.put ("hash", block1->hash ().to_string ()); pending_exists ("1"); pending_exists ("1"); rpc_ctx->io_scope->reset (); reset_confirmation_height (node->store, block1->account ()); rpc_ctx->io_scope->renew (); pending_exists ("0"); request.put ("include_only_confirmed", "false"); rpc_ctx->io_scope->renew (); pending_exists ("1"); } TEST (rpc, wallet_pending) { nano::system system0; auto node = add_ipc_enabled_node (system0); nano::keypair key1; system0.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system0.wallet (0)->insert_adhoc (key1.prv); auto iterations (0); auto block1 (system0.wallet (0)->send_action (nano::dev::genesis_key.pub, key1.pub, 100)); node->scheduler.flush (); while (node->active.active (*block1) || node->ledger.cache.cemented_count < 2 || !node->confirmation_height_processor.current ().is_zero () || node->confirmation_height_processor.awaiting_processing_size () != 0) { system0.poll (); ++iterations; ASSERT_LT (iterations, 200); } auto [rpc, rpc_ctx] = add_rpc (system0, node); boost::property_tree::ptree request; request.put ("action", "wallet_pending"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("count", "100"); auto response (wait_response (system0, rpc, request)); ASSERT_EQ (1, response.get_child ("blocks").size ()); for (auto & pending : response.get_child ("blocks")) { std::string account_text (pending.first); ASSERT_EQ (key1.pub.to_account (), account_text); nano::block_hash hash1 (pending.second.begin ()->second.get<std::string> ("")); ASSERT_EQ (block1->hash (), hash1); } request.put ("threshold", "100"); // Threshold test auto response0 (wait_response (system0, rpc, request)); std::unordered_map<nano::block_hash, nano::uint128_union> blocks; ASSERT_EQ (1, response0.get_child ("blocks").size ()); for (auto & pending : response0.get_child ("blocks")) { std::string account_text (pending.first); ASSERT_EQ (key1.pub.to_account (), account_text); for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); nano::uint128_union amount; amount.decode_dec (i->second.get<std::string> ("")); blocks[hash] = amount; boost::optional<std::string> source (i->second.get_optional<std::string> ("source")); ASSERT_FALSE (source.is_initialized ()); boost::optional<uint8_t> min_version (i->second.get_optional<uint8_t> ("min_version")); ASSERT_FALSE (min_version.is_initialized ()); } } ASSERT_EQ (blocks[block1->hash ()], 100); request.put ("threshold", "101"); auto response1 (wait_response (system0, rpc, request)); auto & pending1 (response1.get_child ("blocks")); ASSERT_EQ (0, pending1.size ()); request.put ("threshold", "0"); request.put ("source", "true"); request.put ("min_version", "true"); auto response2 (wait_response (system0, rpc, request)); std::unordered_map<nano::block_hash, nano::uint128_union> amounts; std::unordered_map<nano::block_hash, nano::account> sources; ASSERT_EQ (1, response2.get_child ("blocks").size ()); for (auto & pending : response2.get_child ("blocks")) { std::string account_text (pending.first); ASSERT_EQ (key1.pub.to_account (), account_text); for (auto i (pending.second.begin ()), j (pending.second.end ()); i != j; ++i) { nano::block_hash hash; hash.decode_hex (i->first); amounts[hash].decode_dec (i->second.get<std::string> ("amount")); sources[hash].decode_account (i->second.get<std::string> ("source")); ASSERT_EQ (i->second.get<uint8_t> ("min_version"), 0); } } ASSERT_EQ (amounts[block1->hash ()], 100); ASSERT_EQ (sources[block1->hash ()], nano::dev::genesis_key.pub); check_block_response_count (system0, rpc, request, 1); rpc_ctx->io_scope->reset (); reset_confirmation_height (system0.nodes.front ()->store, block1->account ()); rpc_ctx->io_scope->renew (); check_block_response_count (system0, rpc, request, 0); request.put ("include_only_confirmed", "false"); rpc_ctx->io_scope->renew (); check_block_response_count (system0, rpc, request, 1); } TEST (rpc, receive_minimum) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "receive_minimum"); auto response (wait_response (system, rpc, request)); std::string amount (response.get<std::string> ("amount")); ASSERT_EQ (node->config.receive_minimum.to_string_dec (), amount); } TEST (rpc, receive_minimum_set) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "receive_minimum_set"); request.put ("amount", "100"); ASSERT_NE (node->config.receive_minimum.to_string_dec (), "100"); auto response (wait_response (system, rpc, request)); std::string success (response.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); ASSERT_EQ (node->config.receive_minimum.to_string_dec (), "100"); } TEST (rpc, work_get) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->work_cache_blocking (nano::dev::genesis_key.pub, node->latest (nano::dev::genesis_key.pub)); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "work_get"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string work_text (response.get<std::string> ("work")); uint64_t work (1); auto transaction (node->wallets.tx_begin_read ()); node->wallets.items.begin ()->second->store.work_get (transaction, nano::dev::genesis->account (), work); ASSERT_EQ (nano::to_string_hex (work), work_text); } TEST (rpc, wallet_work_get) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->work_cache_blocking (nano::dev::genesis_key.pub, node->latest (nano::dev::genesis_key.pub)); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_work_get"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system, rpc, request)); auto transaction (node->wallets.tx_begin_read ()); for (auto & works : response.get_child ("works")) { std::string account_text (works.first); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string work_text (works.second.get<std::string> ("")); uint64_t work (1); node->wallets.items.begin ()->second->store.work_get (transaction, nano::dev::genesis->account (), work); ASSERT_EQ (nano::to_string_hex (work), work_text); } } TEST (rpc, work_set) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); uint64_t work0 (100); boost::property_tree::ptree request; request.put ("action", "work_set"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("work", nano::to_string_hex (work0)); auto response (wait_response (system, rpc, request)); std::string success (response.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); uint64_t work1 (1); auto transaction (node->wallets.tx_begin_read ()); node->wallets.items.begin ()->second->store.work_get (transaction, nano::dev::genesis->account (), work1); ASSERT_EQ (work1, work0); } TEST (rpc, search_pending_all) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto latest (node->latest (nano::dev::genesis_key.pub)); nano::send_block block (latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - node->config.receive_minimum.number (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node->work_generate_blocking (latest)); { auto transaction (node->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, block).code); } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "search_pending_all"); auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node->balance (nano::dev::genesis_key.pub) == nano::dev::genesis_amount); } TEST (rpc, wallet_republish) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::genesis genesis; nano::keypair key; while (key.pub < nano::dev::genesis_key.pub) { nano::keypair key1; key.pub = key1.pub; key.prv = key1.prv; } system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "wallet_republish"); request.put ("wallet", node1->wallets.items.begin ()->first.to_string ()); request.put ("count", 1); auto response (wait_response (system, rpc, request)); auto & blocks_node (response.get_child ("blocks")); std::vector<nano::block_hash> blocks; for (auto i (blocks_node.begin ()), n (blocks_node.end ()); i != n; ++i) { blocks.push_back (nano::block_hash (i->second.get<std::string> (""))); } ASSERT_EQ (2, blocks.size ()); ASSERT_EQ (send.hash (), blocks[0]); ASSERT_EQ (open.hash (), blocks[1]); } TEST (rpc, delegators) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "delegators"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); auto & delegators_node (response.get_child ("delegators")); boost::property_tree::ptree delegators; for (auto i (delegators_node.begin ()), n (delegators_node.end ()); i != n; ++i) { delegators.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (2, delegators.size ()); ASSERT_EQ ("100", delegators.get<std::string> (nano::dev::genesis_key.pub.to_account ())); ASSERT_EQ ("340282366920938463463374607431768211355", delegators.get<std::string> (key.pub.to_account ())); } TEST (rpc, delegators_parameters) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); // Test with "count" = 2 boost::property_tree::ptree request; request.put ("action", "delegators"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("count", 2); auto response (wait_response (system, rpc, request)); auto & delegators_node (response.get_child ("delegators")); boost::property_tree::ptree delegators; for (auto i (delegators_node.begin ()), n (delegators_node.end ()); i != n; ++i) { delegators.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (2, delegators.size ()); ASSERT_EQ ("100", delegators.get<std::string> (nano::dev::genesis_key.pub.to_account ())); ASSERT_EQ ("340282366920938463463374607431768211355", delegators.get<std::string> (key.pub.to_account ())); // Test with "count" = 1 request.put ("count", 1); auto response2 (wait_response (system, rpc, request)); auto & delegators_node2 (response2.get_child ("delegators")); boost::property_tree::ptree delegators2; for (auto i (delegators_node2.begin ()), n (delegators_node2.end ()); i != n; ++i) { delegators2.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (1, delegators2.size ()); // What is first in ledger by public key? if (nano::dev::genesis_key.pub.number () < key.pub.number ()) { ASSERT_EQ ("100", delegators2.get<std::string> (nano::dev::genesis_key.pub.to_account ())); } else { ASSERT_EQ ("340282366920938463463374607431768211355", delegators2.get<std::string> (key.pub.to_account ())); } // Test with "threshold" request.put ("count", 1024); request.put ("threshold", 101); // higher than remaining genesis balance auto response3 (wait_response (system, rpc, request)); auto & delegators_node3 (response3.get_child ("delegators")); boost::property_tree::ptree delegators3; for (auto i (delegators_node3.begin ()), n (delegators_node3.end ()); i != n; ++i) { delegators3.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (1, delegators3.size ()); ASSERT_EQ ("340282366920938463463374607431768211355", delegators3.get<std::string> (key.pub.to_account ())); // Test with "start" before last account request.put ("threshold", 0); auto last_account (key.pub); if (nano::dev::genesis_key.pub.number () > key.pub.number ()) { last_account = nano::dev::genesis_key.pub; } request.put ("start", nano::account (last_account.number () - 1).to_account ()); auto response4 (wait_response (system, rpc, request)); auto & delegators_node4 (response4.get_child ("delegators")); boost::property_tree::ptree delegators4; for (auto i (delegators_node4.begin ()), n (delegators_node4.end ()); i != n; ++i) { delegators4.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (1, delegators4.size ()); boost::optional<std::string> balance (delegators4.get_optional<std::string> (last_account.to_account ())); ASSERT_TRUE (balance.is_initialized ()); // Test with "start" equal to last account request.put ("start", last_account.to_account ()); auto response5 (wait_response (system, rpc, request)); auto & delegators_node5 (response5.get_child ("delegators")); boost::property_tree::ptree delegators5; for (auto i (delegators_node5.begin ()), n (delegators_node5.end ()); i != n; ++i) { delegators5.put ((i->first), (i->second.get<std::string> (""))); } ASSERT_EQ (0, delegators5.size ()); } TEST (rpc, delegators_count) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "delegators_count"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string count (response.get<std::string> ("count")); ASSERT_EQ ("2", count); } TEST (rpc, account_info) { nano::system system; nano::keypair key; nano::genesis genesis; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "account_info"); request.put ("account", nano::account ().to_account ()); // Test for a non existing account { auto response (wait_response (system, rpc, request)); auto error (response.get_optional<std::string> ("error")); ASSERT_TRUE (error.is_initialized ()); ASSERT_EQ (error.get (), std::error_code (nano::error_common::account_not_found).message ()); } rpc_ctx->io_scope->reset (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); auto time (nano::seconds_since_epoch ()); { auto transaction = node1->store.tx_begin_write (); node1->store.confirmation_height.put (transaction, nano::dev::genesis_key.pub, { 1, genesis.hash () }); } rpc_ctx->io_scope->renew (); request.put ("account", nano::dev::genesis_key.pub.to_account ()); { auto response (wait_response (system, rpc, request)); std::string frontier (response.get<std::string> ("frontier")); ASSERT_EQ (send.hash ().to_string (), frontier); std::string open_block (response.get<std::string> ("open_block")); ASSERT_EQ (genesis.hash ().to_string (), open_block); std::string representative_block (response.get<std::string> ("representative_block")); ASSERT_EQ (genesis.hash ().to_string (), representative_block); std::string balance (response.get<std::string> ("balance")); ASSERT_EQ ("100", balance); std::string modified_timestamp (response.get<std::string> ("modified_timestamp")); ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5); std::string block_count (response.get<std::string> ("block_count")); ASSERT_EQ ("2", block_count); std::string confirmation_height (response.get<std::string> ("confirmation_height")); ASSERT_EQ ("1", confirmation_height); std::string confirmation_height_frontier (response.get<std::string> ("confirmation_height_frontier")); ASSERT_EQ (genesis.hash ().to_string (), confirmation_height_frontier); ASSERT_EQ (0, response.get<uint8_t> ("account_version")); boost::optional<std::string> weight (response.get_optional<std::string> ("weight")); ASSERT_FALSE (weight.is_initialized ()); boost::optional<std::string> pending (response.get_optional<std::string> ("pending")); ASSERT_FALSE (pending.is_initialized ()); boost::optional<std::string> representative (response.get_optional<std::string> ("representative")); ASSERT_FALSE (representative.is_initialized ()); } // Test for optional values request.put ("weight", "true"); request.put ("pending", "1"); request.put ("representative", "1"); { auto response (wait_response (system, rpc, request)); std::string weight2 (response.get<std::string> ("weight")); ASSERT_EQ ("100", weight2); std::string pending2 (response.get<std::string> ("pending")); ASSERT_EQ ("0", pending2); std::string representative2 (response.get<std::string> ("representative")); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), representative2); } // Test for confirmed only blocks rpc_ctx->io_scope->reset (); nano::keypair key1; { latest = node1->latest (nano::dev::genesis_key.pub); nano::send_block send1 (latest, key1.pub, 50, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send1); nano::send_block send2 (send1.hash (), key1.pub, 25, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send1.hash ())); node1->process (send2); nano::state_block state_change (nano::dev::genesis_key.pub, send2.hash (), key1.pub, 25, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send2.hash ())); node1->process (state_change); nano::open_block open (send1.hash (), nano::dev::genesis_key.pub, key1.pub, key1.prv, key1.pub, *node1->work_generate_blocking (key1.pub)); node1->process (open); } rpc_ctx->io_scope->renew (); { auto response (wait_response (system, rpc, request)); std::string balance (response.get<std::string> ("balance")); ASSERT_EQ ("25", balance); } request.put ("include_confirmed", true); { auto response (wait_response (system, rpc, request)); auto balance (response.get<std::string> ("balance")); ASSERT_EQ ("25", balance); auto confirmed_balance (response.get<std::string> ("confirmed_balance")); ASSERT_EQ ("340282366920938463463374607431768211455", confirmed_balance); auto representative (response.get<std::string> ("representative")); ASSERT_EQ (representative, key1.pub.to_account ()); auto confirmed_representative (response.get<std::string> ("confirmed_representative")); ASSERT_EQ (confirmed_representative, nano::dev::genesis_key.pub.to_account ()); auto confirmed_frontier (response.get<std::string> ("confirmed_frontier")); ASSERT_EQ (nano::dev::genesis->hash ().to_string (), confirmed_frontier); auto confirmed_height (response.get<uint64_t> ("confirmed_height")); ASSERT_EQ (1, confirmed_height); } request.put ("account", key1.pub.to_account ()); { auto response (wait_response (system, rpc, request)); std::string pending (response.get<std::string> ("pending")); ASSERT_EQ ("25", pending); std::string confirmed_pending (response.get<std::string> ("confirmed_pending")); ASSERT_EQ ("0", confirmed_pending); } request.put ("include_confirmed", false); { auto response (wait_response (system, rpc, request)); std::string pending (response.get<std::string> ("pending")); ASSERT_EQ ("25", pending); // These fields shouldn't exist auto confirmed_balance (response.get_optional<std::string> ("confirmed_balance")); ASSERT_FALSE (confirmed_balance.is_initialized ()); auto confirmed_pending (response.get_optional<std::string> ("confirmed_pending")); ASSERT_FALSE (confirmed_pending.is_initialized ()); auto confirmed_representative (response.get_optional<std::string> ("confirmed_representative")); ASSERT_FALSE (confirmed_representative.is_initialized ()); auto confirmed_frontier (response.get_optional<std::string> ("confirmed_frontier")); ASSERT_FALSE (confirmed_frontier.is_initialized ()); auto confirmed_height (response.get_optional<uint64_t> ("confirmed_height")); ASSERT_FALSE (confirmed_height.is_initialized ()); } } /** Make sure we can use json block literals instead of string as input */ TEST (rpc, json_block_input) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (key.prv); nano::state_block send (nano::dev::genesis->account (), node1->latest (nano::dev::genesis_key.pub), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "sign"); request.put ("json_block", "true"); std::string wallet; node1->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("account", key.pub.to_account ()); boost::property_tree::ptree json; send.serialize_json (json); request.add_child ("block", json); auto response (wait_response (system, rpc, request, 10s)); bool json_error{ false }; nano::state_block block (json_error, response.get_child ("block")); ASSERT_FALSE (json_error); ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), block.block_signature ())); ASSERT_NE (block.block_signature (), send.block_signature ()); ASSERT_EQ (block.hash (), send.hash ()); } /** Make sure we can receive json block literals instead of string as output */ TEST (rpc, json_block_output) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "block_info"); request.put ("json_block", "true"); request.put ("hash", send.hash ().to_string ()); auto response (wait_response (system, rpc, request)); // Make sure contents contains a valid JSON subtree instread of stringified json bool json_error{ false }; nano::send_block send_from_json (json_error, response.get_child ("contents")); ASSERT_FALSE (json_error); } TEST (rpc, blocks_info) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); auto check_blocks = [node] (boost::property_tree::ptree & response) { for (auto & blocks : response.get_child ("blocks")) { std::string hash_text (blocks.first); ASSERT_EQ (node->latest (nano::dev::genesis->account ()).to_string (), hash_text); std::string account_text (blocks.second.get<std::string> ("block_account")); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string amount_text (blocks.second.get<std::string> ("amount")); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), amount_text); std::string blocks_text (blocks.second.get<std::string> ("contents")); ASSERT_FALSE (blocks_text.empty ()); boost::optional<std::string> pending (blocks.second.get_optional<std::string> ("pending")); ASSERT_FALSE (pending.is_initialized ()); boost::optional<std::string> source (blocks.second.get_optional<std::string> ("source_account")); ASSERT_FALSE (source.is_initialized ()); std::string balance_text (blocks.second.get<std::string> ("balance")); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), balance_text); ASSERT_TRUE (blocks.second.get<bool> ("confirmed")); // Genesis block is confirmed by default std::string successor_text (blocks.second.get<std::string> ("successor")); ASSERT_EQ (nano::block_hash (0).to_string (), successor_text); // Genesis block doesn't have successor yet } }; boost::property_tree::ptree request; request.put ("action", "blocks_info"); boost::property_tree::ptree entry; boost::property_tree::ptree hashes; entry.put ("", node->latest (nano::dev::genesis->account ()).to_string ()); hashes.push_back (std::make_pair ("", entry)); request.add_child ("hashes", hashes); { auto response (wait_response (system, rpc, request)); check_blocks (response); } std::string random_hash = nano::block_hash ().to_string (); entry.put ("", random_hash); hashes.push_back (std::make_pair ("", entry)); request.erase ("hashes"); request.add_child ("hashes", hashes); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.get<std::string> ("error")); } request.put ("include_not_found", "true"); { auto response (wait_response (system, rpc, request)); check_blocks (response); auto & blocks_not_found (response.get_child ("blocks_not_found")); ASSERT_EQ (1, blocks_not_found.size ()); ASSERT_EQ (random_hash, blocks_not_found.begin ()->second.get<std::string> ("")); } request.put ("source", "true"); request.put ("pending", "1"); { auto response (wait_response (system, rpc, request)); for (auto & blocks : response.get_child ("blocks")) { std::string source (blocks.second.get<std::string> ("source_account")); ASSERT_EQ ("0", source); std::string pending (blocks.second.get<std::string> ("pending")); ASSERT_EQ ("0", pending); } } } TEST (rpc, blocks_info_subtype) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send); auto receive (system.wallet (0)->receive_action (send->hash (), key.pub, nano::Gxrb_ratio, send->link ().as_account ())); ASSERT_NE (nullptr, receive); auto change (system.wallet (0)->change_action (nano::dev::genesis_key.pub, key.pub)); ASSERT_NE (nullptr, change); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "blocks_info"); boost::property_tree::ptree hashes; boost::property_tree::ptree entry; entry.put ("", send->hash ().to_string ()); hashes.push_back (std::make_pair ("", entry)); entry.put ("", receive->hash ().to_string ()); hashes.push_back (std::make_pair ("", entry)); entry.put ("", change->hash ().to_string ()); hashes.push_back (std::make_pair ("", entry)); request.add_child ("hashes", hashes); auto response (wait_response (system, rpc, request)); auto & blocks (response.get_child ("blocks")); ASSERT_EQ (3, blocks.size ()); auto send_subtype (blocks.get_child (send->hash ().to_string ()).get<std::string> ("subtype")); ASSERT_EQ (send_subtype, "send"); auto receive_subtype (blocks.get_child (receive->hash ().to_string ()).get<std::string> ("subtype")); ASSERT_EQ (receive_subtype, "receive"); auto change_subtype (blocks.get_child (change->hash ().to_string ()).get<std::string> ("subtype")); ASSERT_EQ (change_subtype, "change"); // Successor fields auto send_successor (blocks.get_child (send->hash ().to_string ()).get<std::string> ("successor")); ASSERT_EQ (send_successor, receive->hash ().to_string ()); auto receive_successor (blocks.get_child (receive->hash ().to_string ()).get<std::string> ("successor")); ASSERT_EQ (receive_successor, change->hash ().to_string ()); auto change_successor (blocks.get_child (change->hash ().to_string ()).get<std::string> ("successor")); ASSERT_EQ (change_successor, nano::block_hash (0).to_string ()); // Change block doesn't have successor yet } TEST (rpc, block_info_successor) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "block_info"); request.put ("hash", latest.to_string ()); auto response (wait_response (system, rpc, request)); // Make sure send block is successor of genesis std::string successor_text (response.get<std::string> ("successor")); ASSERT_EQ (successor_text, send.hash ().to_string ()); std::string account_text (response.get<std::string> ("block_account")); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); std::string amount_text (response.get<std::string> ("amount")); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), amount_text); } TEST (rpc, block_info_pruning) { nano::system system; nano::node_config node_config0 (nano::get_available_port (), system.logging); node_config0.receive_minimum = nano::dev::genesis_amount; // Prevent auto-receive & receive1 block conflicts auto & node0 = *system.add_node (node_config0); nano::node_config node_config1 (nano::get_available_port (), system.logging); node_config1.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto node1 = add_ipc_enabled_node (system, node_config1, node_flags); auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send1 (std::make_shared<nano::send_block> (latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest))); node1->process_active (send1); auto receive1 (std::make_shared<nano::receive_block> (send1->hash (), send1->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send1->hash ()))); node1->process_active (receive1); node1->block_processor.flush (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_TIMELY (5s, node1->ledger.cache.cemented_count == 3 && node1->confirmation_height_processor.current ().is_zero () && node1->confirmation_height_processor.awaiting_processing_size () == 0); // Pruning action { auto transaction (node1->store.tx_begin_write ()); ASSERT_EQ (1, node1->ledger.pruning_action (transaction, send1->hash (), 1)); ASSERT_TRUE (node1->store.block.exists (transaction, receive1->hash ())); } auto [rpc, rpc_ctx] = add_rpc (system, node1); // Pruned block boost::property_tree::ptree request; request.put ("action", "block_info"); request.put ("hash", send1->hash ().to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.get<std::string> ("error")); // Existing block with previous pruned boost::property_tree::ptree request2; request2.put ("action", "block_info"); request2.put ("json_block", "true"); request2.put ("hash", receive1->hash ().to_string ()); auto response2 (wait_response (system, rpc, request2)); std::string account_text (response2.get<std::string> ("block_account")); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), account_text); boost::optional<std::string> amount (response2.get_optional<std::string> ("amount")); ASSERT_FALSE (amount.is_initialized ()); // Cannot calculate amount bool json_error{ false }; nano::receive_block receive_from_json (json_error, response2.get_child ("contents")); ASSERT_FALSE (json_error); ASSERT_EQ (receive1->full_hash (), receive_from_json.full_hash ()); std::string balance_text (response2.get<std::string> ("balance")); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), balance_text); ASSERT_TRUE (response2.get<bool> ("confirmed")); std::string successor_text (response2.get<std::string> ("successor")); ASSERT_EQ (successor_text, nano::block_hash (0).to_string ()); // receive1 block doesn't have successor yet } TEST (rpc, pruned_exists) { nano::system system; nano::node_config node_config0 (nano::get_available_port (), system.logging); node_config0.receive_minimum = nano::dev::genesis_amount; // Prevent auto-receive & receive1 block conflicts auto & node0 = *system.add_node (node_config0); nano::node_config node_config1 (nano::get_available_port (), system.logging); node_config1.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto node1 = add_ipc_enabled_node (system, node_config1, node_flags); auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send1 (std::make_shared<nano::send_block> (latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest))); node1->process_active (send1); auto receive1 (std::make_shared<nano::receive_block> (send1->hash (), send1->hash (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send1->hash ()))); node1->process_active (receive1); node1->block_processor.flush (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_TIMELY (5s, node1->ledger.cache.cemented_count == 3 && node1->confirmation_height_processor.current ().is_zero () && node1->confirmation_height_processor.awaiting_processing_size () == 0); // Pruning action { auto transaction (node1->store.tx_begin_write ()); ASSERT_EQ (1, node1->ledger.pruning_action (transaction, send1->hash (), 1)); ASSERT_TRUE (node1->store.block.exists (transaction, receive1->hash ())); } auto [rpc, rpc_ctx] = add_rpc (system, node1); // Pruned block boost::property_tree::ptree request; request.put ("action", "pruned_exists"); request.put ("hash", send1->hash ().to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_TRUE (response.get<bool> ("exists")); // Existing block with previous pruned boost::property_tree::ptree request2; request2.put ("action", "pruned_exists"); request2.put ("hash", receive1->hash ().to_string ()); auto response2 (wait_response (system, rpc, request2)); ASSERT_FALSE (response2.get<bool> ("exists")); } TEST (rpc, work_peers_all) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "work_peer_add"); request.put ("address", "::1"); request.put ("port", "0"); auto response (wait_response (system, rpc, request)); std::string success (response.get<std::string> ("success", "")); ASSERT_TRUE (success.empty ()); boost::property_tree::ptree request1; request1.put ("action", "work_peers"); auto response1 (wait_response (system, rpc, request1)); auto & peers_node (response1.get_child ("work_peers")); std::vector<std::string> peers; for (auto i (peers_node.begin ()), n (peers_node.end ()); i != n; ++i) { peers.push_back (i->second.get<std::string> ("")); } ASSERT_EQ (1, peers.size ()); ASSERT_EQ ("::1:0", peers[0]); boost::property_tree::ptree request2; request2.put ("action", "work_peers_clear"); auto response2 (wait_response (system, rpc, request2)); success = response2.get<std::string> ("success", ""); ASSERT_TRUE (success.empty ()); auto response3 (wait_response (system, rpc, request1, 10s)); peers_node = response3.get_child ("work_peers"); ASSERT_EQ (0, peers_node.size ()); } TEST (rpc, ledger) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; auto latest (node->latest (nano::dev::genesis_key.pub)); auto genesis_balance (nano::dev::genesis_amount); auto send_amount (genesis_balance - 100); genesis_balance -= send_amount; nano::send_block send (latest, key.pub, genesis_balance, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node->work_generate_blocking (latest)); node->process (send); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, *node->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node->process (open).code); auto time (nano::seconds_since_epoch ()); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "ledger"); request.put ("sorting", true); request.put ("count", "1"); { auto response (wait_response (system, rpc, request)); for (auto & account : response.get_child ("accounts")) { std::string account_text (account.first); ASSERT_EQ (key.pub.to_account (), account_text); std::string frontier (account.second.get<std::string> ("frontier")); ASSERT_EQ (open.hash ().to_string (), frontier); std::string open_block (account.second.get<std::string> ("open_block")); ASSERT_EQ (open.hash ().to_string (), open_block); std::string representative_block (account.second.get<std::string> ("representative_block")); ASSERT_EQ (open.hash ().to_string (), representative_block); std::string balance_text (account.second.get<std::string> ("balance")); ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text); std::string modified_timestamp (account.second.get<std::string> ("modified_timestamp")); ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5); std::string block_count (account.second.get<std::string> ("block_count")); ASSERT_EQ ("1", block_count); boost::optional<std::string> weight (account.second.get_optional<std::string> ("weight")); ASSERT_FALSE (weight.is_initialized ()); boost::optional<std::string> pending (account.second.get_optional<std::string> ("pending")); ASSERT_FALSE (pending.is_initialized ()); boost::optional<std::string> representative (account.second.get_optional<std::string> ("representative")); ASSERT_FALSE (representative.is_initialized ()); } } // Test for optional values request.put ("weight", true); request.put ("pending", true); request.put ("representative", true); { auto response (wait_response (system, rpc, request)); for (auto & account : response.get_child ("accounts")) { boost::optional<std::string> weight (account.second.get_optional<std::string> ("weight")); ASSERT_TRUE (weight.is_initialized ()); ASSERT_EQ ("0", weight.get ()); boost::optional<std::string> pending (account.second.get_optional<std::string> ("pending")); ASSERT_TRUE (pending.is_initialized ()); ASSERT_EQ ("0", pending.get ()); boost::optional<std::string> representative (account.second.get_optional<std::string> ("representative")); ASSERT_TRUE (representative.is_initialized ()); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), representative.get ()); } } // Test threshold request.put ("count", 2); request.put ("threshold", genesis_balance + 1); { auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (1, accounts.size ()); auto account (accounts.begin ()); ASSERT_EQ (key.pub.to_account (), account->first); std::string balance_text (account->second.get<std::string> ("balance")); ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text); } auto send2_amount (50); genesis_balance -= send2_amount; nano::send_block send2 (send.hash (), key.pub, genesis_balance, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node->work_generate_blocking (send.hash ())); rpc_ctx->io_scope->reset (); node->process (send2); rpc_ctx->io_scope->renew (); // When asking for pending, pending amount is taken into account for threshold so the account must show up request.put ("count", 2); request.put ("threshold", (send_amount + send2_amount).convert_to<std::string> ()); request.put ("pending", true); { auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (1, accounts.size ()); auto account (accounts.begin ()); ASSERT_EQ (key.pub.to_account (), account->first); std::string balance_text (account->second.get<std::string> ("balance")); ASSERT_EQ (send_amount.convert_to<std::string> (), balance_text); std::string pending_text (account->second.get<std::string> ("pending")); ASSERT_EQ (std::to_string (send2_amount), pending_text); } } TEST (rpc, accounts_create) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "accounts_create"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("count", "8"); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); for (auto i (accounts.begin ()), n (accounts.end ()); i != n; ++i) { std::string account_text (i->second.get<std::string> ("")); nano::account account; ASSERT_FALSE (account.decode_account (account_text)); ASSERT_TRUE (system.wallet (0)->exists (account)); } ASSERT_EQ (8, accounts.size ()); } TEST (rpc, block_create) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); auto send_work = *node1->work_generate_blocking (latest); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, send_work); auto open_work = *node1->work_generate_blocking (key.pub); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, open_work); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "send"); request.put ("wallet", node1->wallets.items.begin ()->first.to_string ()); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("previous", latest.to_string ()); request.put ("amount", "340282366920938463463374607431768211355"); request.put ("destination", key.pub.to_account ()); request.put ("work", nano::to_string_hex (send_work)); auto response (wait_response (system, rpc, request)); std::string send_hash (response.get<std::string> ("hash")); ASSERT_EQ (send.hash ().to_string (), send_hash); std::string send_difficulty (response.get<std::string> ("difficulty")); ASSERT_EQ (nano::to_string_hex (send.difficulty ()), send_difficulty); auto send_text (response.get<std::string> ("block")); boost::property_tree::ptree block_l; std::stringstream block_stream (send_text); boost::property_tree::read_json (block_stream, block_l); auto send_block (nano::deserialize_block_json (block_l)); ASSERT_EQ (send.hash (), send_block->hash ()); rpc_ctx->io_scope->reset (); node1->process (send); rpc_ctx->io_scope->renew (); boost::property_tree::ptree request1; request1.put ("action", "block_create"); request1.put ("type", "open"); std::string key_text; key.prv.encode_hex (key_text); request1.put ("key", key_text); request1.put ("representative", nano::dev::genesis_key.pub.to_account ()); request1.put ("source", send.hash ().to_string ()); request1.put ("work", nano::to_string_hex (open_work)); auto response1 (wait_response (system, rpc, request1)); std::string open_hash (response1.get<std::string> ("hash")); ASSERT_EQ (open.hash ().to_string (), open_hash); auto open_text (response1.get<std::string> ("block")); std::stringstream block_stream1 (open_text); boost::property_tree::read_json (block_stream1, block_l); auto open_block (nano::deserialize_block_json (block_l)); ASSERT_EQ (open.hash (), open_block->hash ()); rpc_ctx->io_scope->reset (); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); rpc_ctx->io_scope->renew (); request1.put ("representative", key.pub.to_account ()); auto response2 (wait_response (system, rpc, request1)); std::string open2_hash (response2.get<std::string> ("hash")); ASSERT_NE (open.hash ().to_string (), open2_hash); // different blocks with wrong representative auto change_work = *node1->work_generate_blocking (open.hash ()); nano::change_block change (open.hash (), key.pub, key.prv, key.pub, change_work); request1.put ("type", "change"); request1.put ("work", nano::to_string_hex (change_work)); auto response4 (wait_response (system, rpc, request1)); std::string change_hash (response4.get<std::string> ("hash")); ASSERT_EQ (change.hash ().to_string (), change_hash); auto change_text (response4.get<std::string> ("block")); std::stringstream block_stream4 (change_text); boost::property_tree::read_json (block_stream4, block_l); auto change_block (nano::deserialize_block_json (block_l)); ASSERT_EQ (change.hash (), change_block->hash ()); rpc_ctx->io_scope->reset (); ASSERT_EQ (nano::process_result::progress, node1->process (change).code); nano::send_block send2 (send.hash (), key.pub, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (send.hash ())); ASSERT_EQ (nano::process_result::progress, node1->process (send2).code); rpc_ctx->io_scope->renew (); boost::property_tree::ptree request2; request2.put ("action", "block_create"); request2.put ("type", "receive"); request2.put ("wallet", node1->wallets.items.begin ()->first.to_string ()); request2.put ("account", key.pub.to_account ()); request2.put ("source", send2.hash ().to_string ()); request2.put ("previous", change.hash ().to_string ()); request2.put ("work", nano::to_string_hex (*node1->work_generate_blocking (change.hash ()))); auto response5 (wait_response (system, rpc, request2)); std::string receive_hash (response4.get<std::string> ("hash")); auto receive_text (response5.get<std::string> ("block")); std::stringstream block_stream5 (change_text); boost::property_tree::read_json (block_stream5, block_l); auto receive_block (nano::deserialize_block_json (block_l)); ASSERT_EQ (receive_hash, receive_block->hash ().to_string ()); node1->process_active (std::move (receive_block)); latest = node1->latest (key.pub); ASSERT_EQ (receive_hash, latest.to_string ()); } TEST (rpc, block_create_state) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("previous", genesis.hash ().to_string ()); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", (nano::dev::genesis_amount - nano::Gxrb_ratio).convert_to<std::string> ()); request.put ("link", key.pub.to_account ()); request.put ("work", nano::to_string_hex (*node->work_generate_blocking (genesis.hash ()))); auto response (wait_response (system, rpc, request)); std::string state_hash (response.get<std::string> ("hash")); auto state_text (response.get<std::string> ("block")); std::stringstream block_stream (state_text); boost::property_tree::ptree block_l; boost::property_tree::read_json (block_stream, block_l); auto state_block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, state_block); ASSERT_EQ (nano::block_type::state, state_block->type ()); ASSERT_EQ (state_hash, state_block->hash ().to_string ()); rpc_ctx->io_scope->reset (); auto process_result (node->process (*state_block)); ASSERT_EQ (nano::process_result::progress, process_result.code); } TEST (rpc, block_create_state_open) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; nano::genesis genesis; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("key", key.prv.to_string ()); request.put ("account", key.pub.to_account ()); request.put ("previous", 0); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", nano::Gxrb_ratio.convert_to<std::string> ()); request.put ("link", send_block->hash ().to_string ()); request.put ("work", nano::to_string_hex (*node->work_generate_blocking (key.pub))); auto response (wait_response (system, rpc, request)); std::string state_hash (response.get<std::string> ("hash")); auto state_text (response.get<std::string> ("block")); std::stringstream block_stream (state_text); boost::property_tree::ptree block_l; boost::property_tree::read_json (block_stream, block_l); auto state_block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, state_block); ASSERT_EQ (nano::block_type::state, state_block->type ()); ASSERT_EQ (state_hash, state_block->hash ().to_string ()); auto difficulty (state_block->difficulty ()); ASSERT_GT (difficulty, nano::work_threshold (state_block->work_version (), nano::block_details (nano::epoch::epoch_0, false, true, false))); ASSERT_TRUE (node->latest (key.pub).is_zero ()); rpc_ctx->io_scope->reset (); auto process_result (node->process (*state_block)); ASSERT_EQ (nano::process_result::progress, process_result.code); ASSERT_EQ (state_block->sideband ().details.epoch, nano::epoch::epoch_0); ASSERT_TRUE (state_block->sideband ().details.is_receive); ASSERT_FALSE (node->latest (key.pub).is_zero ()); } // Missing "work" parameter should cause work to be generated for us. TEST (rpc, block_create_state_request_work) { // Test work generation for state blocks both with and without previous (in the latter // case, the account will be used for work generation) std::unique_ptr<nano::state_block> epoch2; { nano::system system (1); system.upgrade_genesis_epoch (*system.nodes.front (), nano::epoch::epoch_1); epoch2 = system.upgrade_genesis_epoch (*system.nodes.front (), nano::epoch::epoch_2); } std::vector<std::string> previous_test_input{ epoch2->hash ().to_string (), std::string ("0") }; for (auto previous : previous_test_input) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); request.put ("account", nano::dev::genesis_key.pub.to_account ()); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", (nano::dev::genesis_amount - nano::Gxrb_ratio).convert_to<std::string> ()); request.put ("link", key.pub.to_account ()); request.put ("previous", previous); auto response (wait_response (system, rpc, request)); boost::property_tree::ptree block_l; std::stringstream block_stream (response.get<std::string> ("block")); boost::property_tree::read_json (block_stream, block_l); auto block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, block); ASSERT_GE (block->difficulty (), node->default_difficulty (nano::work_version::work_1)); } } TEST (rpc, block_create_open_epoch_v2) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1)); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("key", key.prv.to_string ()); request.put ("account", key.pub.to_account ()); request.put ("previous", 0); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", nano::Gxrb_ratio.convert_to<std::string> ()); request.put ("link", send_block->hash ().to_string ()); auto response (wait_response (system, rpc, request)); std::string state_hash (response.get<std::string> ("hash")); auto state_text (response.get<std::string> ("block")); std::stringstream block_stream (state_text); boost::property_tree::ptree block_l; boost::property_tree::read_json (block_stream, block_l); auto state_block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, state_block); ASSERT_EQ (nano::block_type::state, state_block->type ()); ASSERT_EQ (state_hash, state_block->hash ().to_string ()); auto difficulty (state_block->difficulty ()); ASSERT_GT (difficulty, nano::work_threshold (state_block->work_version (), nano::block_details (nano::epoch::epoch_2, false, true, false))); ASSERT_TRUE (node->latest (key.pub).is_zero ()); rpc_ctx->io_scope->reset (); auto process_result (node->process (*state_block)); ASSERT_EQ (nano::process_result::progress, process_result.code); ASSERT_EQ (state_block->sideband ().details.epoch, nano::epoch::epoch_2); ASSERT_TRUE (state_block->sideband ().details.is_receive); ASSERT_FALSE (node->latest (key.pub).is_zero ()); } TEST (rpc, block_create_receive_epoch_v2) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1)); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); nano::state_block open (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, send_block->hash (), key.prv, key.pub, *node->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node->process (open).code); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); auto send_block_2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("key", key.prv.to_string ()); request.put ("account", key.pub.to_account ()); request.put ("previous", open.hash ().to_string ()); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", (2 * nano::Gxrb_ratio).convert_to<std::string> ()); request.put ("link", send_block_2->hash ().to_string ()); auto response (wait_response (system, rpc, request)); std::string state_hash (response.get<std::string> ("hash")); auto state_text (response.get<std::string> ("block")); std::stringstream block_stream (state_text); boost::property_tree::ptree block_l; boost::property_tree::read_json (block_stream, block_l); auto state_block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, state_block); ASSERT_EQ (nano::block_type::state, state_block->type ()); ASSERT_EQ (state_hash, state_block->hash ().to_string ()); auto difficulty (state_block->difficulty ()); ASSERT_GT (difficulty, nano::work_threshold (state_block->work_version (), nano::block_details (nano::epoch::epoch_2, false, true, false))); rpc_ctx->io_scope->reset (); auto process_result (node->process (*state_block)); ASSERT_EQ (nano::process_result::progress, process_result.code); ASSERT_EQ (state_block->sideband ().details.epoch, nano::epoch::epoch_2); ASSERT_TRUE (state_block->sideband ().details.is_receive); ASSERT_FALSE (node->latest (key.pub).is_zero ()); } TEST (rpc, block_create_send_epoch_v2) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_1)); ASSERT_NE (nullptr, system.upgrade_genesis_epoch (*node, nano::epoch::epoch_2)); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); nano::state_block open (key.pub, 0, nano::dev::genesis_key.pub, nano::Gxrb_ratio, send_block->hash (), key.prv, key.pub, *node->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node->process (open).code); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_create"); request.put ("type", "state"); request.put ("key", key.prv.to_string ()); request.put ("account", key.pub.to_account ()); request.put ("previous", open.hash ().to_string ()); request.put ("representative", nano::dev::genesis_key.pub.to_account ()); request.put ("balance", 0); request.put ("link", nano::dev::genesis_key.pub.to_string ()); auto response (wait_response (system, rpc, request)); std::string state_hash (response.get<std::string> ("hash")); auto state_text (response.get<std::string> ("block")); std::stringstream block_stream (state_text); boost::property_tree::ptree block_l; boost::property_tree::read_json (block_stream, block_l); auto state_block (nano::deserialize_block_json (block_l)); ASSERT_NE (nullptr, state_block); ASSERT_EQ (nano::block_type::state, state_block->type ()); ASSERT_EQ (state_hash, state_block->hash ().to_string ()); auto difficulty (state_block->difficulty ()); ASSERT_GT (difficulty, nano::work_threshold (state_block->work_version (), nano::block_details (nano::epoch::epoch_2, true, false, false))); rpc_ctx->io_scope->reset (); auto process_result (node->process (*state_block)); ASSERT_EQ (nano::process_result::progress, process_result.code); ASSERT_EQ (state_block->sideband ().details.epoch, nano::epoch::epoch_2); ASSERT_TRUE (state_block->sideband ().details.is_send); ASSERT_FALSE (node->latest (key.pub).is_zero ()); } TEST (rpc, block_hash) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::keypair key; auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); boost::property_tree::ptree request; request.put ("action", "block_hash"); std::string json; send.serialize_json (json); request.put ("block", json); auto response (wait_response (system, rpc, request)); std::string send_hash (response.get<std::string> ("hash")); ASSERT_EQ (send.hash ().to_string (), send_hash); } TEST (rpc, wallet_lock) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); { auto transaction (system.wallet (0)->wallets.tx_begin_read ()); ASSERT_TRUE (system.wallet (0)->store.valid_password (transaction)); } request.put ("wallet", wallet); request.put ("action", "wallet_lock"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("locked")); ASSERT_EQ (account_text1, "1"); auto transaction (system.wallet (0)->wallets.tx_begin_read ()); ASSERT_FALSE (system.wallet (0)->store.valid_password (transaction)); } TEST (rpc, wallet_locked) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_locked"); auto response (wait_response (system, rpc, request)); std::string account_text1 (response.get<std::string> ("locked")); ASSERT_EQ (account_text1, "0"); } TEST (rpc, wallet_create_fail) { nano::system system; auto node = add_ipc_enabled_node (system); // lmdb_max_dbs should be removed once the wallet store is refactored to support more wallets. for (int i = 0; i < 127; i++) { node->wallets.create (nano::random_wallet_id ()); } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_create"); auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_common::wallet_lmdb_max_dbs).message (), response.get<std::string> ("error")); } TEST (rpc, wallet_ledger) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (key.prv); auto latest (node1->latest (nano::dev::genesis_key.pub)); nano::send_block send (latest, key.pub, 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node1->work_generate_blocking (latest)); node1->process (send); nano::open_block open (send.hash (), nano::dev::genesis_key.pub, key.pub, key.prv, key.pub, *node1->work_generate_blocking (key.pub)); ASSERT_EQ (nano::process_result::progress, node1->process (open).code); auto time (nano::seconds_since_epoch ()); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "wallet_ledger"); request.put ("wallet", node1->wallets.items.begin ()->first.to_string ()); request.put ("sorting", "1"); request.put ("count", "1"); auto response (wait_response (system, rpc, request)); for (auto & accounts : response.get_child ("accounts")) { std::string account_text (accounts.first); ASSERT_EQ (key.pub.to_account (), account_text); std::string frontier (accounts.second.get<std::string> ("frontier")); ASSERT_EQ (open.hash ().to_string (), frontier); std::string open_block (accounts.second.get<std::string> ("open_block")); ASSERT_EQ (open.hash ().to_string (), open_block); std::string representative_block (accounts.second.get<std::string> ("representative_block")); ASSERT_EQ (open.hash ().to_string (), representative_block); std::string balance_text (accounts.second.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211355", balance_text); std::string modified_timestamp (accounts.second.get<std::string> ("modified_timestamp")); ASSERT_LT (std::abs ((long)time - stol (modified_timestamp)), 5); std::string block_count (accounts.second.get<std::string> ("block_count")); ASSERT_EQ ("1", block_count); boost::optional<std::string> weight (accounts.second.get_optional<std::string> ("weight")); ASSERT_FALSE (weight.is_initialized ()); boost::optional<std::string> pending (accounts.second.get_optional<std::string> ("pending")); ASSERT_FALSE (pending.is_initialized ()); boost::optional<std::string> representative (accounts.second.get_optional<std::string> ("representative")); ASSERT_FALSE (representative.is_initialized ()); } // Test for optional values request.put ("weight", "true"); request.put ("pending", "1"); request.put ("representative", "false"); auto response2 (wait_response (system, rpc, request)); for (auto & accounts : response2.get_child ("accounts")) { boost::optional<std::string> weight (accounts.second.get_optional<std::string> ("weight")); ASSERT_TRUE (weight.is_initialized ()); ASSERT_EQ ("0", weight.get ()); boost::optional<std::string> pending (accounts.second.get_optional<std::string> ("pending")); ASSERT_TRUE (pending.is_initialized ()); ASSERT_EQ ("0", pending.get ()); boost::optional<std::string> representative (accounts.second.get_optional<std::string> ("representative")); ASSERT_FALSE (representative.is_initialized ()); } } TEST (rpc, wallet_add_watch) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; std::string wallet; node->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("action", "wallet_add_watch"); boost::property_tree::ptree entry; boost::property_tree::ptree peers_l; entry.put ("", nano::dev::genesis_key.pub.to_account ()); peers_l.push_back (std::make_pair ("", entry)); request.add_child ("accounts", peers_l); auto response (wait_response (system, rpc, request)); std::string success (response.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); ASSERT_TRUE (system.wallet (0)->exists (nano::dev::genesis_key.pub)); // Make sure using special wallet key as pubkey fails nano::public_key bad_key (1); entry.put ("", bad_key.to_account ()); peers_l.push_back (std::make_pair ("", entry)); request.erase ("accounts"); request.add_child ("accounts", peers_l); auto response_error (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::bad_public_key); ASSERT_EQ (response_error.get<std::string> ("error"), ec.message ()); } TEST (rpc, online_reps) { nano::system system (1); auto node1 (system.nodes[0]); auto node2 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_EQ (node2->online_reps.online (), 0); auto send_block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_NE (nullptr, send_block); ASSERT_TIMELY (10s, !node2->online_reps.list ().empty ()); ASSERT_EQ (node2->online_reps.online (), nano::dev::genesis_amount - nano::Gxrb_ratio); auto [rpc, rpc_ctx] = add_rpc (system, node2); boost::property_tree::ptree request; request.put ("action", "representatives_online"); auto response (wait_response (system, rpc, request)); auto representatives (response.get_child ("representatives")); auto item (representatives.begin ()); ASSERT_NE (representatives.end (), item); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), item->second.get<std::string> ("")); boost::optional<std::string> weight (item->second.get_optional<std::string> ("weight")); ASSERT_FALSE (weight.is_initialized ()); ASSERT_TIMELY (5s, node2->block (send_block->hash ())); //Test weight option request.put ("weight", "true"); auto response2 (wait_response (system, rpc, request)); auto representatives2 (response2.get_child ("representatives")); auto item2 (representatives2.begin ()); ASSERT_NE (representatives2.end (), item2); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), item2->first); auto weight2 (item2->second.get<std::string> ("weight")); ASSERT_EQ (node2->weight (nano::dev::genesis_key.pub).convert_to<std::string> (), weight2); //Test accounts filter rpc_ctx->io_scope->reset (); auto new_rep (system.wallet (1)->deterministic_insert ()); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, new_rep, node1->config.receive_minimum.number ())); rpc_ctx->io_scope->renew (); ASSERT_NE (nullptr, send); ASSERT_TIMELY (10s, node2->block (send->hash ())); rpc_ctx->io_scope->reset (); auto receive (system.wallet (1)->receive_action (send->hash (), new_rep, node1->config.receive_minimum.number (), send->link ().as_account ())); rpc_ctx->io_scope->renew (); ASSERT_NE (nullptr, receive); ASSERT_TIMELY (5s, node2->block (receive->hash ())); rpc_ctx->io_scope->reset (); auto change (system.wallet (0)->change_action (nano::dev::genesis_key.pub, new_rep)); rpc_ctx->io_scope->renew (); ASSERT_NE (nullptr, change); ASSERT_TIMELY (5s, node2->block (change->hash ())); ASSERT_TIMELY (5s, node2->online_reps.list ().size () == 2); boost::property_tree::ptree child_rep; child_rep.put ("", new_rep.to_account ()); boost::property_tree::ptree filtered_accounts; filtered_accounts.push_back (std::make_pair ("", child_rep)); request.add_child ("accounts", filtered_accounts); auto response3 (wait_response (system, rpc, request, 10s)); auto representatives3 (response3.get_child ("representatives")); auto item3 (representatives3.begin ()); ASSERT_NE (representatives3.end (), item3); ASSERT_EQ (new_rep.to_account (), item3->first); ASSERT_EQ (representatives3.size (), 1); node2->stop (); } TEST (rpc, confirmation_height_currently_processing) { nano::system system; nano::node_flags node_flags; node_flags.force_use_write_database_queue = true; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled; auto node = add_ipc_enabled_node (system, node_config, node_flags); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto previous_genesis_chain_hash = node->latest (nano::dev::genesis_key.pub); { auto transaction = node->store.tx_begin_write (); nano::keypair key1; nano::send_block send (previous_genesis_chain_hash, key1.pub, nano::dev::genesis_amount - nano::Gxrb_ratio - 1, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (previous_genesis_chain_hash)); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, send).code); previous_genesis_chain_hash = send.hash (); } std::shared_ptr<nano::block> frontier; { auto transaction = node->store.tx_begin_read (); frontier = node->store.block.get (transaction, previous_genesis_chain_hash); } boost::property_tree::ptree request; request.put ("action", "confirmation_height_currently_processing"); auto [rpc, rpc_ctx] = add_rpc (system, node); // Begin process for confirming the block (and setting confirmation height) { // Write guard prevents the confirmation height processor writing the blocks, so that we can inspect contents during the response auto write_guard = node->write_database_queue.wait (nano::writer::testing); node->block_confirm (frontier); ASSERT_TIMELY (10s, node->confirmation_height_processor.current () == frontier->hash ()); // Make the request { auto response (wait_response (system, rpc, request, 10s)); auto hash (response.get<std::string> ("hash")); ASSERT_EQ (frontier->hash ().to_string (), hash); } } // Wait until confirmation has been set and not processing anything ASSERT_TIMELY (10s, node->confirmation_height_processor.current ().is_zero () && node->confirmation_height_processor.awaiting_processing_size () == 0); // Make the same request, it should now return an error { auto response (wait_response (system, rpc, request, 10s)); std::error_code ec (nano::error_rpc::confirmation_height_not_processing); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } TEST (rpc, confirmation_history) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_TRUE (node->active.list_recently_cemented ().empty ()); auto block (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_TIMELY (10s, !node->active.list_recently_cemented ().empty ()); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "confirmation_history"); auto response (wait_response (system, rpc, request)); auto representatives (response.get_child ("confirmations")); auto item (representatives.begin ()); ASSERT_NE (representatives.end (), item); auto hash (item->second.get<std::string> ("hash")); auto tally (item->second.get<std::string> ("tally")); auto final_tally (item->second.get<std::string> ("final")); ASSERT_EQ (1, item->second.count ("duration")); ASSERT_EQ (1, item->second.count ("time")); ASSERT_EQ (1, item->second.count ("request_count")); ASSERT_EQ (1, item->second.count ("voters")); ASSERT_GE (1U, item->second.get<unsigned> ("blocks")); ASSERT_EQ (block->hash ().to_string (), hash); nano::amount tally_num; tally_num.decode_dec (tally); debug_assert (tally_num == nano::dev::genesis_amount || tally_num == (nano::dev::genesis_amount - nano::Gxrb_ratio)); system.stop (); } TEST (rpc, confirmation_history_hash) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); ASSERT_TRUE (node->active.list_recently_cemented ().empty ()); auto send1 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); auto send2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); auto send3 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, nano::Gxrb_ratio)); ASSERT_TIMELY (10s, node->active.list_recently_cemented ().size () == 3); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "confirmation_history"); request.put ("hash", send2->hash ().to_string ()); auto response (wait_response (system, rpc, request)); auto representatives (response.get_child ("confirmations")); ASSERT_EQ (representatives.size (), 1); auto item (representatives.begin ()); ASSERT_NE (representatives.end (), item); auto hash (item->second.get<std::string> ("hash")); auto tally (item->second.get<std::string> ("tally")); ASSERT_FALSE (item->second.get<std::string> ("duration", "").empty ()); ASSERT_FALSE (item->second.get<std::string> ("time", "").empty ()); ASSERT_EQ (send2->hash ().to_string (), hash); nano::amount tally_num; tally_num.decode_dec (tally); debug_assert (tally_num == nano::dev::genesis_amount || tally_num == (nano::dev::genesis_amount - nano::Gxrb_ratio) || tally_num == (nano::dev::genesis_amount - 2 * nano::Gxrb_ratio) || tally_num == (nano::dev::genesis_amount - 3 * nano::Gxrb_ratio)); system.stop (); } TEST (rpc, block_confirm) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::genesis genesis; auto send1 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, genesis.hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - nano::Gxrb_ratio, nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *node->work_generate_blocking (genesis.hash ()))); { auto transaction (node->store.tx_begin_write ()); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, *send1).code); } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_confirm"); request.put ("hash", send1->hash ().to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("started")); } TEST (rpc, block_confirm_absent) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_confirm"); request.put ("hash", "0"); auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.get<std::string> ("error")); } TEST (rpc, block_confirm_confirmed) { nano::system system (1); auto path (nano::unique_path ()); nano::node_config config; config.peering_port = nano::get_available_port (); config.callback_address = "localhost"; config.callback_port = nano::get_available_port (); config.callback_target = "/"; config.logging.init (path); auto node = add_ipc_enabled_node (system, config); nano::genesis genesis; { auto transaction (node->store.tx_begin_read ()); ASSERT_TRUE (node->ledger.block_confirmed (transaction, genesis.hash ())); } ASSERT_EQ (0, node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out)); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_confirm"); request.put ("hash", genesis.hash ().to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("started")); // Check confirmation history auto confirmed (node->active.list_recently_cemented ()); ASSERT_EQ (1, confirmed.size ()); ASSERT_EQ (nano::dev::genesis->hash (), confirmed.begin ()->winner->hash ()); // Check callback ASSERT_TIMELY (10s, node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out) != 0); // Callback result is error because callback target port isn't listening ASSERT_EQ (1, node->stats.count (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out)); node->stop (); } TEST (rpc, node_id) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "node_id"); auto response (wait_response (system, rpc, request)); ASSERT_EQ (node->node_id.prv.to_string (), response.get<std::string> ("private")); ASSERT_EQ (node->node_id.pub.to_account (), response.get<std::string> ("as_account")); ASSERT_EQ (node->node_id.pub.to_node_id (), response.get<std::string> ("node_id")); } TEST (rpc, stats_clear) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; node->stats.inc (nano::stat::type::ledger, nano::stat::dir::in); ASSERT_EQ (1, node->stats.count (nano::stat::type::ledger, nano::stat::dir::in)); boost::property_tree::ptree request; request.put ("action", "stats_clear"); auto response (wait_response (system, rpc, request)); std::string success (response.get<std::string> ("success")); ASSERT_TRUE (success.empty ()); ASSERT_EQ (0, node->stats.count (nano::stat::type::ledger, nano::stat::dir::in)); ASSERT_LE (node->stats.last_reset ().count (), 5); } TEST (rpc, unchecked) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 1, key.pub, key.prv, key.pub, *system.work.generate (key.pub))); auto open2 (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 2, key.pub, key.prv, key.pub, *system.work.generate (key.pub))); node->process_active (open); node->process_active (open2); node->block_processor.flush (); boost::property_tree::ptree request; request.put ("action", "unchecked"); request.put ("count", 2); { auto response (wait_response (system, rpc, request)); auto & blocks (response.get_child ("blocks")); ASSERT_EQ (2, blocks.size ()); ASSERT_EQ (1, blocks.count (open->hash ().to_string ())); ASSERT_EQ (1, blocks.count (open2->hash ().to_string ())); } request.put ("json_block", true); { auto response (wait_response (system, rpc, request)); auto & blocks (response.get_child ("blocks")); ASSERT_EQ (2, blocks.size ()); auto & open_block (blocks.get_child (open->hash ().to_string ())); ASSERT_EQ ("state", open_block.get<std::string> ("type")); } } TEST (rpc, unchecked_get) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 1, key.pub, key.prv, key.pub, *system.work.generate (key.pub))); node->process_active (open); node->block_processor.flush (); boost::property_tree::ptree request; request.put ("action", "unchecked_get"); request.put ("hash", open->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("contents")); auto timestamp (response.get<uint64_t> ("modified_timestamp")); ASSERT_LE (timestamp, nano::seconds_since_epoch ()); } request.put ("json_block", true); { auto response (wait_response (system, rpc, request)); auto & contents (response.get_child ("contents")); ASSERT_EQ ("state", contents.get<std::string> ("type")); auto timestamp (response.get<uint64_t> ("modified_timestamp")); ASSERT_LE (timestamp, nano::seconds_since_epoch ()); } } TEST (rpc, unchecked_clear) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); nano::keypair key; auto open (std::make_shared<nano::state_block> (key.pub, 0, key.pub, 1, key.pub, key.prv, key.pub, *system.work.generate (key.pub))); node->process_active (open); node->block_processor.flush (); boost::property_tree::ptree request; { ASSERT_EQ (node->store.unchecked.count (node->store.tx_begin_read ()), 1); } request.put ("action", "unchecked_clear"); auto response (wait_response (system, rpc, request)); ASSERT_TIMELY (10s, node->store.unchecked.count (node->store.tx_begin_read ()) == 0); } TEST (rpc, unopened) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::account account1 (1), account2 (account1.number () + 1); auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, account1, 1)); ASSERT_NE (nullptr, send); auto send2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, account2, 10)); ASSERT_NE (nullptr, send2); auto [rpc, rpc_ctx] = add_rpc (system, node); { boost::property_tree::ptree request; request.put ("action", "unopened"); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (2, accounts.size ()); ASSERT_EQ ("1", accounts.get<std::string> (account1.to_account ())); ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ())); } { // starting at second account should get a single result boost::property_tree::ptree request; request.put ("action", "unopened"); request.put ("account", account2.to_account ()); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (1, accounts.size ()); ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ())); } { // starting at third account should get no results boost::property_tree::ptree request; request.put ("action", "unopened"); request.put ("account", nano::account (account2.number () + 1).to_account ()); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (0, accounts.size ()); } { // using count=1 should get a single result boost::property_tree::ptree request; request.put ("action", "unopened"); request.put ("count", "1"); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (1, accounts.size ()); ASSERT_EQ ("1", accounts.get<std::string> (account1.to_account ())); } { // using threshold at 5 should get a single result boost::property_tree::ptree request; request.put ("action", "unopened"); request.put ("threshold", 5); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (1, accounts.size ()); ASSERT_EQ ("10", accounts.get<std::string> (account2.to_account ())); } } TEST (rpc, unopened_burn) { nano::system system; auto node = add_ipc_enabled_node (system); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto genesis (node->latest (nano::dev::genesis_key.pub)); ASSERT_FALSE (genesis.is_zero ()); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::constants.burn_account, 1)); ASSERT_NE (nullptr, send); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "unopened"); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (0, accounts.size ()); } TEST (rpc, unopened_no_accounts) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "unopened"); auto response (wait_response (system, rpc, request)); auto & accounts (response.get_child ("accounts")); ASSERT_EQ (0, accounts.size ()); } TEST (rpc, uptime) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "uptime"); std::this_thread::sleep_for (std::chrono::seconds (1)); auto response (wait_response (system, rpc, request)); ASSERT_LE (1, response.get<int> ("seconds")); } TEST (rpc, wallet_history) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; auto node = add_ipc_enabled_node (system, node_config); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); auto timestamp1 (nano::seconds_since_epoch ()); auto send (system.wallet (0)->send_action (nano::dev::genesis_key.pub, nano::dev::genesis_key.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, send); auto timestamp2 (nano::seconds_since_epoch ()); auto receive (system.wallet (0)->receive_action (send->hash (), nano::dev::genesis_key.pub, node->config.receive_minimum.number (), send->link ().as_account ())); ASSERT_NE (nullptr, receive); nano::keypair key; auto timestamp3 (nano::seconds_since_epoch ()); auto send2 (system.wallet (0)->send_action (nano::dev::genesis_key.pub, key.pub, node->config.receive_minimum.number ())); ASSERT_NE (nullptr, send2); system.deadline_set (10s); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "wallet_history"); request.put ("wallet", node->wallets.items.begin ()->first.to_string ()); auto response (wait_response (system, rpc, request)); std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string, std::string>> history_l; auto & history_node (response.get_child ("history")); for (auto i (history_node.begin ()), n (history_node.end ()); i != n; ++i) { history_l.push_back (std::make_tuple (i->second.get<std::string> ("type"), i->second.get<std::string> ("account"), i->second.get<std::string> ("amount"), i->second.get<std::string> ("hash"), i->second.get<std::string> ("block_account"), i->second.get<std::string> ("local_timestamp"))); } ASSERT_EQ (4, history_l.size ()); ASSERT_EQ ("send", std::get<0> (history_l[0])); ASSERT_EQ (key.pub.to_account (), std::get<1> (history_l[0])); ASSERT_EQ (node->config.receive_minimum.to_string_dec (), std::get<2> (history_l[0])); ASSERT_EQ (send2->hash ().to_string (), std::get<3> (history_l[0])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<4> (history_l[0])); ASSERT_EQ (std::to_string (timestamp3), std::get<5> (history_l[0])); ASSERT_EQ ("receive", std::get<0> (history_l[1])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[1])); ASSERT_EQ (node->config.receive_minimum.to_string_dec (), std::get<2> (history_l[1])); ASSERT_EQ (receive->hash ().to_string (), std::get<3> (history_l[1])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<4> (history_l[1])); ASSERT_EQ (std::to_string (timestamp2), std::get<5> (history_l[1])); ASSERT_EQ ("send", std::get<0> (history_l[2])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[2])); ASSERT_EQ (node->config.receive_minimum.to_string_dec (), std::get<2> (history_l[2])); ASSERT_EQ (send->hash ().to_string (), std::get<3> (history_l[2])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<4> (history_l[2])); ASSERT_EQ (std::to_string (timestamp1), std::get<5> (history_l[2])); // Genesis block ASSERT_EQ ("receive", std::get<0> (history_l[3])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<1> (history_l[3])); ASSERT_EQ (nano::dev::genesis_amount.convert_to<std::string> (), std::get<2> (history_l[3])); ASSERT_EQ (nano::dev::genesis->hash ().to_string (), std::get<3> (history_l[3])); ASSERT_EQ (nano::dev::genesis_key.pub.to_account (), std::get<4> (history_l[3])); } TEST (rpc, sign_hash) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; nano::state_block send (nano::dev::genesis->account (), node1->latest (nano::dev::genesis_key.pub), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "sign"); request.put ("hash", send.hash ().to_string ()); request.put ("key", key.prv.to_string ()); auto response (wait_response (system, rpc, request, 10s)); std::error_code ec (nano::error_rpc::sign_hash_disabled); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); rpc_ctx->node_rpc_config->enable_sign_hash = true; auto response2 (wait_response (system, rpc, request, 10s)); nano::signature signature; std::string signature_text (response2.get<std::string> ("signature")); ASSERT_FALSE (signature.decode_hex (signature_text)); ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), signature)); } TEST (rpc, sign_block) { nano::system system; auto node1 = add_ipc_enabled_node (system); nano::keypair key; system.wallet (0)->insert_adhoc (key.prv); nano::state_block send (nano::dev::genesis->account (), node1->latest (nano::dev::genesis_key.pub), nano::dev::genesis->account (), nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0); auto [rpc, rpc_ctx] = add_rpc (system, node1); boost::property_tree::ptree request; request.put ("action", "sign"); std::string wallet; node1->wallets.items.begin ()->first.encode_hex (wallet); request.put ("wallet", wallet); request.put ("account", key.pub.to_account ()); std::string json; send.serialize_json (json); request.put ("block", json); auto response (wait_response (system, rpc, request, 10s)); auto contents (response.get<std::string> ("block")); boost::property_tree::ptree block_l; std::stringstream block_stream (contents); boost::property_tree::read_json (block_stream, block_l); auto block (nano::deserialize_block_json (block_l)); ASSERT_FALSE (nano::validate_message (key.pub, send.hash (), block->block_signature ())); ASSERT_NE (block->block_signature (), send.block_signature ()); ASSERT_EQ (block->hash (), send.hash ()); } TEST (rpc, memory_stats) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); // Preliminary test adding to the vote uniquer and checking json output is correct nano::keypair key; auto block (std::make_shared<nano::state_block> (0, 0, 0, 0, 0, key.prv, key.pub, 0)); std::vector<nano::block_hash> hashes; hashes.push_back (block->hash ()); auto vote (std::make_shared<nano::vote> (key.pub, key.prv, 0, hashes)); node->vote_uniquer.unique (vote); boost::property_tree::ptree request; request.put ("action", "stats"); request.put ("type", "objects"); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (response.get_child ("node").get_child ("vote_uniquer").get_child ("votes").get<std::string> ("count"), "1"); } request.put ("type", "database"); { auto response (wait_response (system, rpc, request)); ASSERT_TRUE (!response.empty ()); } } TEST (rpc, block_confirmed) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "block_info"); request.put ("hash", "bad_hash1337"); auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::invalid_block_hash).message (), response.get<std::string> ("error")); request.put ("hash", "0"); auto response1 (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response1.get<std::string> ("error")); rpc_ctx->io_scope->reset (); nano::keypair key; // Open an account directly in the ledger { auto transaction = node->store.tx_begin_write (); nano::block_hash latest (node->latest (nano::dev::genesis_key.pub)); nano::send_block send1 (latest, key.pub, 300, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest)); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, send1).code); nano::open_block open1 (send1.hash (), nano::dev::genesis->account (), key.pub, key.prv, key.pub, *system.work.generate (key.pub)); ASSERT_EQ (nano::process_result::progress, node->ledger.process (transaction, open1).code); } rpc_ctx->io_scope->renew (); // This should not be confirmed nano::block_hash latest (node->latest (nano::dev::genesis_key.pub)); request.put ("hash", latest.to_string ()); auto response2 (wait_response (system, rpc, request)); ASSERT_FALSE (response2.get<bool> ("confirmed")); // Create and process a new send block auto send = std::make_shared<nano::send_block> (latest, key.pub, 10, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest)); node->process_active (send); node->block_processor.flush (); node->block_confirm (send); auto election = node->active.election (send->qualified_root ()); ASSERT_NE (nullptr, election); election->force_confirm (); // Wait until the confirmation height has been set ASSERT_TIMELY (10s, node->ledger.block_confirmed (node->store.tx_begin_read (), send->hash ()) && !node->confirmation_height_processor.is_processing_block (send->hash ())); // Requesting confirmation for this should now succeed request.put ("hash", send->hash ().to_string ()); auto response3 (wait_response (system, rpc, request)); ASSERT_TRUE (response3.get<bool> ("confirmed")); } TEST (rpc, database_txn_tracker) { if (nano::rocksdb_config::using_rocksdb_in_tests ()) { // Don't test this in rocksdb mode return; } // First try when database tracking is disabled { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "database_txn_tracker"); { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::tracking_not_enabled); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); } } // Now try enabling it but with invalid amounts nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.diagnostics_config.txn_tracking.enable = true; auto node = add_ipc_enabled_node (system, node_config); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; auto check_not_correct_amount = [&system, &request, rpc = rpc] () { auto response (wait_response (system, rpc, request)); std::error_code ec (nano::error_common::invalid_amount); ASSERT_EQ (response.get<std::string> ("error"), ec.message ()); }; request.put ("action", "database_txn_tracker"); request.put ("min_read_time", "not a time"); check_not_correct_amount (); // Read is valid now, but write isn't request.put ("min_read_time", "1000"); request.put ("min_write_time", "bad time"); check_not_correct_amount (); // Now try where times are large unattainable numbers request.put ("min_read_time", "1000000"); request.put ("min_write_time", "1000000"); std::promise<void> keep_txn_alive_promise; std::promise<void> txn_created_promise; std::thread thread ([&store = node->store, &keep_txn_alive_promise, &txn_created_promise] () { // Use rpc_process_container as a placeholder as this thread is only instantiated by the daemon so won't be used nano::thread_role::set (nano::thread_role::name::rpc_process_container); // Create a read transaction to test auto read_tx = store.tx_begin_read (); // Sleep so that the read transaction has been alive for at least 1 seconds. A write lock is not used in this test as it can cause a deadlock with // other writes done in the background std::this_thread::sleep_for (1s); txn_created_promise.set_value (); keep_txn_alive_promise.get_future ().wait (); }); txn_created_promise.get_future ().wait (); // Adjust minimum read time so that it can detect the read transaction being opened request.put ("min_read_time", "1000"); // It can take a long time to generate stack traces auto response (wait_response (system, rpc, request, 60s)); keep_txn_alive_promise.set_value (); std::vector<std::tuple<std::string, std::string, std::string, std::vector<std::tuple<std::string, std::string, std::string, std::string>>>> json_l; auto & json_node (response.get_child ("txn_tracking")); for (auto & stat : json_node) { auto & stack_trace = stat.second.get_child ("stacktrace"); std::vector<std::tuple<std::string, std::string, std::string, std::string>> frames_json_l; for (auto & frame : stack_trace) { frames_json_l.emplace_back (frame.second.get<std::string> ("name"), frame.second.get<std::string> ("address"), frame.second.get<std::string> ("source_file"), frame.second.get<std::string> ("source_line")); } json_l.emplace_back (stat.second.get<std::string> ("thread"), stat.second.get<std::string> ("time_held_open"), stat.second.get<std::string> ("write"), std::move (frames_json_l)); } ASSERT_EQ (1, json_l.size ()); auto thread_name = nano::thread_role::get_string (nano::thread_role::name::rpc_process_container); // Should only have a read transaction ASSERT_EQ (thread_name, std::get<0> (json_l.front ())); ASSERT_LE (1000u, boost::lexical_cast<unsigned> (std::get<1> (json_l.front ()))); ASSERT_EQ ("false", std::get<2> (json_l.front ())); // Due to results being different for different compilers/build options we cannot reliably check the contents. // The best we can do is just check that there are entries. ASSERT_TRUE (!std::get<3> (json_l.front ()).empty ()); thread.join (); } TEST (rpc, active_difficulty) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); ASSERT_EQ (node->default_difficulty (nano::work_version::work_1), node->network_params.network.publish_thresholds.epoch_2); boost::property_tree::ptree request; request.put ("action", "active_difficulty"); auto expected_multiplier{ 1.0 }; { auto response (wait_response (system, rpc, request)); auto network_minimum_text (response.get<std::string> ("network_minimum")); uint64_t network_minimum; ASSERT_FALSE (nano::from_string_hex (network_minimum_text, network_minimum)); ASSERT_EQ (node->default_difficulty (nano::work_version::work_1), network_minimum); auto network_receive_minimum_text (response.get<std::string> ("network_receive_minimum")); uint64_t network_receive_minimum; ASSERT_FALSE (nano::from_string_hex (network_receive_minimum_text, network_receive_minimum)); ASSERT_EQ (node->default_receive_difficulty (nano::work_version::work_1), network_receive_minimum); auto multiplier (response.get<double> ("multiplier")); ASSERT_NEAR (expected_multiplier, multiplier, 1e-6); auto network_current_text (response.get<std::string> ("network_current")); uint64_t network_current; ASSERT_FALSE (nano::from_string_hex (network_current_text, network_current)); ASSERT_EQ (nano::difficulty::from_multiplier (expected_multiplier, node->default_difficulty (nano::work_version::work_1)), network_current); auto network_receive_current_text (response.get<std::string> ("network_receive_current")); uint64_t network_receive_current; ASSERT_FALSE (nano::from_string_hex (network_receive_current_text, network_receive_current)); auto network_receive_current_multiplier (nano::difficulty::to_multiplier (network_receive_current, network_receive_minimum)); auto network_receive_current_normalized_multiplier (nano::normalized_multiplier (network_receive_current_multiplier, network_receive_minimum)); ASSERT_NEAR (network_receive_current_normalized_multiplier, multiplier, 1e-6); ASSERT_EQ (response.not_found (), response.find ("difficulty_trend")); } // Test include_trend optional request.put ("include_trend", true); { auto response (wait_response (system, rpc, request)); auto trend_opt (response.get_child_optional ("difficulty_trend")); ASSERT_TRUE (trend_opt.is_initialized ()); auto & trend (trend_opt.get ()); ASSERT_EQ (1, trend.size ()); } } // This is mainly to check for threading issues with TSAN TEST (rpc, simultaneous_calls) { // This tests simulatenous calls to the same node in different threads nano::system system; auto node = add_ipc_enabled_node (system); scoped_io_thread_name_change scoped_thread_name_io; nano::thread_runner runner (system.io_ctx, node->config.io_threads); nano::node_rpc_config node_rpc_config; nano::ipc::ipc_server ipc_server (*node, node_rpc_config); nano::rpc_config rpc_config (nano::get_available_port (), true); rpc_config.rpc_process.ipc_port = node->config.ipc_config.transport_tcp.port; rpc_config.rpc_process.num_ipc_connections = 8; nano::ipc_rpc_processor ipc_rpc_processor (system.io_ctx, rpc_config); nano::rpc rpc (system.io_ctx, rpc_config, ipc_rpc_processor); rpc.start (); boost::property_tree::ptree request; request.put ("action", "account_block_count"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); constexpr auto num = 100; std::array<std::unique_ptr<test_response>, num> test_responses; for (int i = 0; i < num; ++i) { test_responses[i] = std::make_unique<test_response> (request, system.io_ctx); } std::promise<void> promise; std::atomic<int> count{ num }; for (int i = 0; i < num; ++i) { std::thread ([&test_responses, &promise, &count, i, port = rpc.config.port] () { test_responses[i]->run (port); if (--count == 0) { promise.set_value (); } }) .detach (); } promise.get_future ().wait (); ASSERT_TIMELY (60s, std::all_of (test_responses.begin (), test_responses.end (), [] (const auto & test_response) { return test_response->status != 0; })); for (int i = 0; i < num; ++i) { ASSERT_EQ (200, test_responses[i]->status); std::string block_count_text (test_responses[i]->json.get<std::string> ("block_count")); ASSERT_EQ ("1", block_count_text); } rpc.stop (); system.stop (); ipc_server.stop (); system.io_ctx.stop (); runner.join (); } // This tests that the inprocess RPC (i.e without using IPC) works correctly TEST (rpc, in_process) { nano::system system; auto node = add_ipc_enabled_node (system); scoped_io_thread_name_change scoped_thread_name_io; nano::rpc_config rpc_config (nano::get_available_port (), true); rpc_config.rpc_process.ipc_port = node->config.ipc_config.transport_tcp.port; nano::node_rpc_config node_rpc_config; nano::ipc::ipc_server ipc_server (*node, node_rpc_config); nano::inprocess_rpc_handler inprocess_rpc_handler (*node, ipc_server, node_rpc_config); auto rpc (std::make_shared<nano::rpc> (system.io_ctx, rpc_config, inprocess_rpc_handler)); rpc->start (); boost::property_tree::ptree request; request.put ("action", "account_balance"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); std::string balance_text (response.get<std::string> ("balance")); ASSERT_EQ ("340282366920938463463374607431768211455", balance_text); std::string pending_text (response.get<std::string> ("pending")); ASSERT_EQ ("0", pending_text); } TEST (rpc_config, serialization) { nano::rpc_config config1; config1.address = boost::asio::ip::address_v6::any ().to_string (); config1.port = 10; config1.enable_control = true; config1.max_json_depth = 10; config1.rpc_process.io_threads = 2; config1.rpc_process.ipc_address = boost::asio::ip::address_v6::any ().to_string (); config1.rpc_process.ipc_port = 2000; config1.rpc_process.num_ipc_connections = 99; nano::jsonconfig tree; config1.serialize_json (tree); nano::rpc_config config2; ASSERT_NE (config2.address, config1.address); ASSERT_NE (config2.port, config1.port); ASSERT_NE (config2.enable_control, config1.enable_control); ASSERT_NE (config2.max_json_depth, config1.max_json_depth); ASSERT_NE (config2.rpc_process.io_threads, config1.rpc_process.io_threads); ASSERT_NE (config2.rpc_process.ipc_address, config1.rpc_process.ipc_address); ASSERT_NE (config2.rpc_process.ipc_port, config1.rpc_process.ipc_port); ASSERT_NE (config2.rpc_process.num_ipc_connections, config1.rpc_process.num_ipc_connections); bool upgraded{ false }; config2.deserialize_json (upgraded, tree); ASSERT_EQ (config2.address, config1.address); ASSERT_EQ (config2.port, config1.port); ASSERT_EQ (config2.enable_control, config1.enable_control); ASSERT_EQ (config2.max_json_depth, config1.max_json_depth); ASSERT_EQ (config2.rpc_process.io_threads, config1.rpc_process.io_threads); ASSERT_EQ (config2.rpc_process.ipc_address, config1.rpc_process.ipc_address); ASSERT_EQ (config2.rpc_process.ipc_port, config1.rpc_process.ipc_port); ASSERT_EQ (config2.rpc_process.num_ipc_connections, config1.rpc_process.num_ipc_connections); } TEST (rpc, deprecated_account_format) { nano::system system; auto node = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "account_info"); request.put ("account", nano::dev::genesis_key.pub.to_account ()); auto response (wait_response (system, rpc, request)); boost::optional<std::string> deprecated_account_format (response.get_optional<std::string> ("deprecated_account_format")); ASSERT_FALSE (deprecated_account_format.is_initialized ()); std::string account_text (nano::dev::genesis_key.pub.to_account ()); account_text[4] = '-'; request.put ("account", account_text); auto response2 (wait_response (system, rpc, request)); std::string frontier (response2.get<std::string> ("frontier")); ASSERT_EQ (nano::dev::genesis->hash ().to_string (), frontier); boost::optional<std::string> deprecated_account_format2 (response2.get_optional<std::string> ("deprecated_account_format")); ASSERT_TRUE (deprecated_account_format2.is_initialized ()); } TEST (rpc, epoch_upgrade) { nano::system system; auto node = add_ipc_enabled_node (system); nano::keypair key1, key2, key3; nano::keypair epoch_signer (nano::dev::genesis_key); auto send1 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, nano::dev::genesis->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 1, key1.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (nano::dev::genesis->hash ()))); // to opened account ASSERT_EQ (nano::process_result::progress, node->process (*send1).code); auto send2 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send1->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 2, key2.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send1->hash ()))); // to unopened account (pending) ASSERT_EQ (nano::process_result::progress, node->process (*send2).code); auto send3 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send2->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 3, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send2->hash ()))); // to burn (0) ASSERT_EQ (nano::process_result::progress, node->process (*send3).code); nano::account max_account (std::numeric_limits<nano::uint256_t>::max ()); auto send4 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send3->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 4, max_account, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send3->hash ()))); // to max account ASSERT_EQ (nano::process_result::progress, node->process (*send4).code); auto open (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 1, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub))); ASSERT_EQ (nano::process_result::progress, node->process (*open).code); // Check accounts epochs { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (2, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_0); } } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "epoch_upgrade"); request.put ("epoch", 1); request.put ("key", epoch_signer.prv.to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("started")); auto response_fail (wait_response (system, rpc, request)); ASSERT_EQ ("0", response_fail.get<std::string> ("started")); ASSERT_TIMELY (10s, 4 == node->store.account.count (node->store.tx_begin_read ())); // Check upgrade { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (4, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_1); } ASSERT_TRUE (node->store.account.exists (transaction, key1.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key2.pub)); ASSERT_TRUE (node->store.account.exists (transaction, std::numeric_limits<nano::uint256_t>::max ())); ASSERT_FALSE (node->store.account.exists (transaction, 0)); } rpc_ctx->io_scope->reset (); // Epoch 2 upgrade auto genesis_latest (node->latest (nano::dev::genesis_key.pub)); auto send5 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, genesis_latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - 5, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (genesis_latest))); // to burn (0) ASSERT_EQ (nano::process_result::progress, node->process (*send5).code); auto send6 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send5->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 6, key1.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send5->hash ()))); // to key1 (again) ASSERT_EQ (nano::process_result::progress, node->process (*send6).code); auto key1_latest (node->latest (key1.pub)); auto send7 (std::make_shared<nano::state_block> (key1.pub, key1_latest, key1.pub, 0, key3.pub, key1.prv, key1.pub, *system.work.generate (key1_latest))); // to key3 ASSERT_EQ (nano::process_result::progress, node->process (*send7).code); { // Check pending entry auto transaction (node->store.tx_begin_read ()); nano::pending_info info; ASSERT_FALSE (node->store.pending.get (transaction, nano::pending_key (key3.pub, send7->hash ()), info)); ASSERT_EQ (nano::epoch::epoch_1, info.epoch); } rpc_ctx->io_scope->renew (); request.put ("epoch", 2); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ ("1", response2.get<std::string> ("started")); ASSERT_TIMELY (10s, 5 == node->store.account.count (node->store.tx_begin_read ())); // Check upgrade { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (5, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_2); } ASSERT_TRUE (node->store.account.exists (transaction, key1.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key2.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key3.pub)); ASSERT_TRUE (node->store.account.exists (transaction, std::numeric_limits<nano::uint256_t>::max ())); ASSERT_FALSE (node->store.account.exists (transaction, 0)); } } TEST (rpc, epoch_upgrade_multithreaded) { nano::system system; nano::node_config node_config (nano::get_available_port (), system.logging); node_config.work_threads = 4; auto node = add_ipc_enabled_node (system, node_config); nano::keypair key1, key2, key3; nano::keypair epoch_signer (nano::dev::genesis_key); auto send1 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, nano::dev::genesis->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 1, key1.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (nano::dev::genesis->hash ()))); // to opened account ASSERT_EQ (nano::process_result::progress, node->process (*send1).code); auto send2 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send1->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 2, key2.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send1->hash ()))); // to unopened account (pending) ASSERT_EQ (nano::process_result::progress, node->process (*send2).code); auto send3 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send2->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 3, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send2->hash ()))); // to burn (0) ASSERT_EQ (nano::process_result::progress, node->process (*send3).code); nano::account max_account (std::numeric_limits<nano::uint256_t>::max ()); auto send4 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send3->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 4, max_account, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send3->hash ()))); // to max account ASSERT_EQ (nano::process_result::progress, node->process (*send4).code); auto open (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, 1, send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub))); ASSERT_EQ (nano::process_result::progress, node->process (*open).code); // Check accounts epochs { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (2, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_0); } } auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "epoch_upgrade"); request.put ("threads", 2); request.put ("epoch", 1); request.put ("key", epoch_signer.prv.to_string ()); auto response (wait_response (system, rpc, request)); ASSERT_EQ ("1", response.get<std::string> ("started")); ASSERT_TIMELY (5s, 4 == node->store.account.count (node->store.tx_begin_read ())); // Check upgrade { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (4, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_1); } ASSERT_TRUE (node->store.account.exists (transaction, key1.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key2.pub)); ASSERT_TRUE (node->store.account.exists (transaction, std::numeric_limits<nano::uint256_t>::max ())); ASSERT_FALSE (node->store.account.exists (transaction, 0)); } rpc_ctx->io_scope->reset (); // Epoch 2 upgrade auto genesis_latest (node->latest (nano::dev::genesis_key.pub)); auto send5 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, genesis_latest, nano::dev::genesis_key.pub, nano::dev::genesis_amount - 5, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (genesis_latest))); // to burn (0) ASSERT_EQ (nano::process_result::progress, node->process (*send5).code); auto send6 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, send5->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - 6, key1.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send5->hash ()))); // to key1 (again) ASSERT_EQ (nano::process_result::progress, node->process (*send6).code); auto key1_latest (node->latest (key1.pub)); auto send7 (std::make_shared<nano::state_block> (key1.pub, key1_latest, key1.pub, 0, key3.pub, key1.prv, key1.pub, *system.work.generate (key1_latest))); // to key3 ASSERT_EQ (nano::process_result::progress, node->process (*send7).code); { // Check pending entry auto transaction (node->store.tx_begin_read ()); nano::pending_info info; ASSERT_FALSE (node->store.pending.get (transaction, nano::pending_key (key3.pub, send7->hash ()), info)); ASSERT_EQ (nano::epoch::epoch_1, info.epoch); } rpc_ctx->io_scope->renew (); request.put ("epoch", 2); auto response2 (wait_response (system, rpc, request)); ASSERT_EQ ("1", response2.get<std::string> ("started")); ASSERT_TIMELY (5s, 5 == node->store.account.count (node->store.tx_begin_read ())); // Check upgrade { auto transaction (node->store.tx_begin_read ()); ASSERT_EQ (5, node->store.account.count (transaction)); for (auto i (node->store.account.begin (transaction)); i != node->store.account.end (); ++i) { nano::account_info info (i->second); ASSERT_EQ (info.epoch (), nano::epoch::epoch_2); } ASSERT_TRUE (node->store.account.exists (transaction, key1.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key2.pub)); ASSERT_TRUE (node->store.account.exists (transaction, key3.pub)); ASSERT_TRUE (node->store.account.exists (transaction, std::numeric_limits<nano::uint256_t>::max ())); ASSERT_FALSE (node->store.account.exists (transaction, 0)); } } TEST (rpc, account_lazy_start) { nano::system system; nano::node_flags node_flags; node_flags.disable_legacy_bootstrap = true; auto node1 = system.add_node (node_flags); nano::keypair key; // Generating test chain auto send1 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, nano::dev::genesis->hash (), nano::dev::genesis_key.pub, nano::dev::genesis_amount - nano::Gxrb_ratio, key.pub, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (nano::dev::genesis->hash ()))); ASSERT_EQ (nano::process_result::progress, node1->process (*send1).code); auto open (std::make_shared<nano::open_block> (send1->hash (), key.pub, key.pub, key.prv, key.pub, *system.work.generate (key.pub))); ASSERT_EQ (nano::process_result::progress, node1->process (*open).code); // Start lazy bootstrap with account nano::node_config node_config (nano::get_available_port (), system.logging); node_config.ipc_config.transport_tcp.enabled = true; node_config.ipc_config.transport_tcp.port = nano::get_available_port (); auto node2 = system.add_node (node_config, node_flags); node2->network.udp_channels.insert (node1->network.endpoint (), node1->network_params.protocol.protocol_version); auto [rpc, rpc_ctx] = add_rpc (system, node2); boost::property_tree::ptree request; request.put ("action", "account_info"); request.put ("account", key.pub.to_account ()); auto response (wait_response (system, rpc, request)); boost::optional<std::string> account_error (response.get_optional<std::string> ("error")); ASSERT_TRUE (account_error.is_initialized ()); // Check processed blocks ASSERT_TIMELY (10s, !node2->bootstrap_initiator.in_progress ()); node2->block_processor.flush (); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (send1->hash ())); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (open->hash ())); } TEST (rpc, receive) { nano::system system; auto node = add_ipc_enabled_node (system); auto wallet = system.wallet (0); std::string wallet_text; node->wallets.items.begin ()->first.encode_hex (wallet_text); wallet->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key1; wallet->insert_adhoc (key1.prv); auto send1 (wallet->send_action (nano::dev::genesis_key.pub, key1.pub, node->config.receive_minimum.number (), *node->work_generate_blocking (nano::dev::genesis->hash ()))); ASSERT_TIMELY (5s, node->balance (nano::dev::genesis_key.pub) != nano::dev::genesis_amount); ASSERT_TIMELY (10s, !node->store.account.exists (node->store.tx_begin_read (), key1.pub)); // Send below minimum receive amount auto send2 (wallet->send_action (nano::dev::genesis_key.pub, key1.pub, node->config.receive_minimum.number () - 1, *node->work_generate_blocking (send1->hash ()))); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "receive"); request.put ("wallet", wallet_text); request.put ("account", key1.pub.to_account ()); request.put ("block", send2->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); auto receive_text (response.get<std::string> ("block")); nano::account_info info; ASSERT_FALSE (node->store.account.get (node->store.tx_begin_read (), key1.pub, info)); ASSERT_EQ (info.head, nano::block_hash{ receive_text }); } // Trying to receive the same block should fail with unreceivable { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_process::unreceivable).message (), response.get<std::string> ("error")); } // Trying to receive a non-existing block should fail request.put ("block", nano::block_hash (send2->hash ().number () + 1).to_string ()); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.get<std::string> ("error")); } } TEST (rpc, receive_unopened) { nano::system system; auto node = add_ipc_enabled_node (system); auto wallet = system.wallet (0); std::string wallet_text; node->wallets.items.begin ()->first.encode_hex (wallet_text); wallet->insert_adhoc (nano::dev::genesis_key.prv); // Test receiving for unopened account nano::keypair key1; auto send1 (wallet->send_action (nano::dev::genesis_key.pub, key1.pub, node->config.receive_minimum.number () - 1, *node->work_generate_blocking (nano::dev::genesis->hash ()))); ASSERT_TIMELY (5s, !node->balance (nano::dev::genesis_key.pub) != nano::dev::genesis_amount); ASSERT_FALSE (node->store.account.exists (node->store.tx_begin_read (), key1.pub)); ASSERT_TRUE (node->store.block.exists (node->store.tx_begin_read (), send1->hash ())); wallet->insert_adhoc (key1.prv); // should not auto receive, amount sent was lower than minimum auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "receive"); request.put ("wallet", wallet_text); request.put ("account", key1.pub.to_account ()); request.put ("block", send1->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); auto receive_text (response.get<std::string> ("block")); nano::account_info info; ASSERT_FALSE (node->store.account.get (node->store.tx_begin_read (), key1.pub, info)); ASSERT_EQ (info.head, info.open_block); ASSERT_EQ (info.head.to_string (), receive_text); ASSERT_EQ (info.representative, nano::dev::genesis_key.pub); } rpc_ctx->io_scope->reset (); // Test receiving for an unopened with a different wallet representative nano::keypair key2; auto prev_amount (node->balance (nano::dev::genesis_key.pub)); auto send2 (wallet->send_action (nano::dev::genesis_key.pub, key2.pub, node->config.receive_minimum.number () - 1, *node->work_generate_blocking (send1->hash ()))); ASSERT_TIMELY (5s, !node->balance (nano::dev::genesis_key.pub) != prev_amount); ASSERT_FALSE (node->store.account.exists (node->store.tx_begin_read (), key2.pub)); ASSERT_TRUE (node->store.block.exists (node->store.tx_begin_read (), send2->hash ())); nano::public_key rep; wallet->store.representative_set (node->wallets.tx_begin_write (), rep); wallet->insert_adhoc (key2.prv); // should not auto receive, amount sent was lower than minimum rpc_ctx->io_scope->renew (); request.put ("account", key2.pub.to_account ()); request.put ("block", send2->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); auto receive_text (response.get<std::string> ("block")); nano::account_info info; ASSERT_FALSE (node->store.account.get (node->store.tx_begin_read (), key2.pub, info)); ASSERT_EQ (info.head, info.open_block); ASSERT_EQ (info.head.to_string (), receive_text); ASSERT_EQ (info.representative, rep); } } TEST (rpc, receive_work_disabled) { nano::system system; nano::node_config config (nano::get_available_port (), system.logging); auto & worker_node = *system.add_node (config); config.peering_port = nano::get_available_port (); config.work_threads = 0; auto node = add_ipc_enabled_node (system, config); auto wallet = system.wallet (1); std::string wallet_text; node->wallets.items.begin ()->first.encode_hex (wallet_text); wallet->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key1; nano::genesis genesis; ASSERT_TRUE (worker_node.work_generation_enabled ()); auto send1 (wallet->send_action (nano::dev::genesis_key.pub, key1.pub, node->config.receive_minimum.number () - 1, *worker_node.work_generate_blocking (genesis.hash ()), false)); ASSERT_TRUE (send1 != nullptr); ASSERT_TIMELY (5s, node->balance (nano::dev::genesis_key.pub) != nano::dev::genesis_amount); ASSERT_FALSE (node->store.account.exists (node->store.tx_begin_read (), key1.pub)); ASSERT_TRUE (node->store.block.exists (node->store.tx_begin_read (), send1->hash ())); wallet->insert_adhoc (key1.prv); auto [rpc, rpc_ctx] = add_rpc (system, node); boost::property_tree::ptree request; request.put ("action", "receive"); request.put ("wallet", wallet_text); request.put ("account", key1.pub.to_account ()); request.put ("block", send1->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_common::disabled_work_generation).message (), response.get<std::string> ("error")); } } TEST (rpc, receive_pruned) { nano::system system; auto & node1 = *system.add_node (); nano::node_config node_config (nano::get_available_port (), system.logging); node_config.enable_voting = false; // Remove after allowing pruned voting nano::node_flags node_flags; node_flags.enable_pruning = true; auto node2 = add_ipc_enabled_node (system, node_config, node_flags); auto wallet1 = system.wallet (0); auto wallet2 = system.wallet (1); std::string wallet_text; node2->wallets.items.begin ()->first.encode_hex (wallet_text); wallet1->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key1; wallet2->insert_adhoc (key1.prv); auto send1 (wallet1->send_action (nano::dev::genesis_key.pub, key1.pub, node2->config.receive_minimum.number (), *node2->work_generate_blocking (nano::dev::genesis->hash ()))); ASSERT_TIMELY (5s, node2->balance (nano::dev::genesis_key.pub) != nano::dev::genesis_amount); ASSERT_TIMELY (10s, !node2->store.account.exists (node2->store.tx_begin_read (), key1.pub)); // Send below minimum receive amount auto send2 (wallet1->send_action (nano::dev::genesis_key.pub, key1.pub, node2->config.receive_minimum.number () - 1, *node2->work_generate_blocking (send1->hash ()))); // Extra send frontier auto send3 (wallet1->send_action (nano::dev::genesis_key.pub, key1.pub, node2->config.receive_minimum.number (), *node2->work_generate_blocking (send1->hash ()))); // Pruning ASSERT_TIMELY (5s, node2->ledger.cache.cemented_count == 6 && node2->confirmation_height_processor.current ().is_zero () && node2->confirmation_height_processor.awaiting_processing_size () == 0); { auto transaction (node2->store.tx_begin_write ()); ASSERT_EQ (2, node2->ledger.pruning_action (transaction, send2->hash (), 1)); } ASSERT_EQ (2, node2->ledger.cache.pruned_count); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (send1->hash ())); ASSERT_FALSE (node2->store.block.exists (node2->store.tx_begin_read (), send1->hash ())); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (send2->hash ())); ASSERT_FALSE (node2->store.block.exists (node2->store.tx_begin_read (), send2->hash ())); ASSERT_TRUE (node2->ledger.block_or_pruned_exists (send3->hash ())); auto [rpc, rpc_ctx] = add_rpc (system, node2); boost::property_tree::ptree request; request.put ("action", "receive"); request.put ("wallet", wallet_text); request.put ("account", key1.pub.to_account ()); request.put ("block", send2->hash ().to_string ()); { auto response (wait_response (system, rpc, request)); auto receive_text (response.get<std::string> ("block")); nano::account_info info; ASSERT_FALSE (node2->store.account.get (node2->store.tx_begin_read (), key1.pub, info)); ASSERT_EQ (info.head, nano::block_hash{ receive_text }); } // Trying to receive the same block should fail with unreceivable { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_process::unreceivable).message (), response.get<std::string> ("error")); } // Trying to receive a non-existing block should fail request.put ("block", nano::block_hash (send2->hash ().number () + 1).to_string ()); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (std::error_code (nano::error_blocks::not_found).message (), response.get<std::string> ("error")); } } TEST (rpc, telemetry_single) { nano::system system (1); auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); // Wait until peers are stored as they are done in the background auto peers_stored = false; ASSERT_TIMELY (10s, node1->store.peer.count (node1->store.tx_begin_read ()) != 0); // Missing port boost::property_tree::ptree request; auto node = system.nodes.front (); request.put ("action", "telemetry"); request.put ("address", "not_a_valid_address"); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_rpc::requires_port_and_address).message (), response.get<std::string> ("error")); } // Missing address request.erase ("address"); request.put ("port", 65); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_rpc::requires_port_and_address).message (), response.get<std::string> ("error")); } // Try with invalid address request.put ("address", "not_a_valid_address"); request.put ("port", 65); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::invalid_ip_address).message (), response.get<std::string> ("error")); } // Then invalid port request.put ("address", (boost::format ("%1%") % node->network.endpoint ().address ()).str ()); request.put ("port", "invalid port"); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_common::invalid_port).message (), response.get<std::string> ("error")); } // Use correctly formed address and port request.put ("port", node->network.endpoint ().port ()); { auto response (wait_response (system, rpc, request, 10s)); nano::jsonconfig config (response); nano::telemetry_data telemetry_data; auto const should_ignore_identification_metrics = false; ASSERT_FALSE (telemetry_data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data (telemetry_data, node->network_params, node->config.bandwidth_limit, node->default_difficulty (nano::work_version::work_1), node->node_id); } } TEST (rpc, telemetry_all) { nano::system system (1); auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); // Wait until peers are stored as they are done in the background ASSERT_TIMELY (10s, node1->store.peer.count (node1->store.tx_begin_read ()) != 0); // First need to set up the cached data std::atomic<bool> done{ false }; auto node = system.nodes.front (); node1->telemetry->get_metrics_single_peer_async (node1->network.find_channel (node->network.endpoint ()), [&done] (nano::telemetry_data_response const & telemetry_data_response_a) { ASSERT_FALSE (telemetry_data_response_a.error); done = true; }); ASSERT_TIMELY (10s, done); boost::property_tree::ptree request; request.put ("action", "telemetry"); { auto response (wait_response (system, rpc, request, 10s)); nano::jsonconfig config (response); nano::telemetry_data telemetry_data; auto const should_ignore_identification_metrics = true; ASSERT_FALSE (telemetry_data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data_excluding_signature (telemetry_data, node->network_params, node->config.bandwidth_limit, node->default_difficulty (nano::work_version::work_1)); ASSERT_FALSE (response.get_optional<std::string> ("node_id").is_initialized ()); ASSERT_FALSE (response.get_optional<std::string> ("signature").is_initialized ()); } request.put ("raw", "true"); auto response (wait_response (system, rpc, request, 10s)); // This may fail if the response has taken longer than the cache cutoff time. auto & all_metrics = response.get_child ("metrics"); auto & metrics = all_metrics.front ().second; ASSERT_EQ (1, all_metrics.size ()); nano::jsonconfig config (metrics); nano::telemetry_data data; auto const should_ignore_identification_metrics = false; ASSERT_FALSE (data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data (data, node->network_params, node->config.bandwidth_limit, node->default_difficulty (nano::work_version::work_1), node->node_id); ASSERT_EQ (node->network.endpoint ().address ().to_string (), metrics.get<std::string> ("address")); ASSERT_EQ (node->network.endpoint ().port (), metrics.get<uint16_t> ("port")); ASSERT_TRUE (node1->network.find_node_id (data.node_id)); } // Also tests all forms of ipv4/ipv6 TEST (rpc, telemetry_self) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); // Just to have peer count at 1 node1->network.udp_channels.insert (nano::endpoint (boost::asio::ip::make_address_v6 ("::1"), nano::get_available_port ()), 0); boost::property_tree::ptree request; request.put ("action", "telemetry"); request.put ("address", "::1"); request.put ("port", node1->network.endpoint ().port ()); auto const should_ignore_identification_metrics = false; { auto response (wait_response (system, rpc, request, 10s)); nano::telemetry_data data; nano::jsonconfig config (response); ASSERT_FALSE (data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data (data, node1->network_params, node1->config.bandwidth_limit, node1->default_difficulty (nano::work_version::work_1), node1->node_id); } request.put ("address", "[::1]"); { auto response (wait_response (system, rpc, request, 10s)); nano::telemetry_data data; nano::jsonconfig config (response); ASSERT_FALSE (data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data (data, node1->network_params, node1->config.bandwidth_limit, node1->default_difficulty (nano::work_version::work_1), node1->node_id); } request.put ("address", "127.0.0.1"); { auto response (wait_response (system, rpc, request, 10s)); nano::telemetry_data data; nano::jsonconfig config (response); ASSERT_FALSE (data.deserialize_json (config, should_ignore_identification_metrics)); nano::compare_default_telemetry_response_data (data, node1->network_params, node1->config.bandwidth_limit, node1->default_difficulty (nano::work_version::work_1), node1->node_id); } // Incorrect port should fail request.put ("port", "0"); { auto response (wait_response (system, rpc, request, 10s)); ASSERT_EQ (std::error_code (nano::error_rpc::peer_not_found).message (), response.get<std::string> ("error")); } } TEST (rpc, confirmation_active) { nano::system system; nano::node_config node_config; node_config.ipc_config.transport_tcp.enabled = true; node_config.ipc_config.transport_tcp.port = nano::get_available_port (); nano::node_flags node_flags; node_flags.disable_request_loop = true; auto node1 (system.add_node (node_config, node_flags)); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::genesis genesis; auto send1 (std::make_shared<nano::send_block> (genesis.hash (), nano::public_key (), nano::dev::genesis_amount - 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (genesis.hash ()))); auto send2 (std::make_shared<nano::send_block> (send1->hash (), nano::public_key (), nano::dev::genesis_amount - 200, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (send1->hash ()))); node1->process_active (send1); node1->process_active (send2); nano::blocks_confirm (*node1, { send1, send2 }); ASSERT_EQ (2, node1->active.size ()); auto election (node1->active.election (send1->qualified_root ())); ASSERT_NE (nullptr, election); election->force_confirm (); boost::property_tree::ptree request; request.put ("action", "confirmation_active"); { auto response (wait_response (system, rpc, request)); auto & confirmations (response.get_child ("confirmations")); ASSERT_EQ (1, confirmations.size ()); ASSERT_EQ (send2->qualified_root ().to_string (), confirmations.front ().second.get<std::string> ("")); ASSERT_EQ (1, response.get<unsigned> ("unconfirmed")); ASSERT_EQ (1, response.get<unsigned> ("confirmed")); } } TEST (rpc, confirmation_info) { nano::system system; auto node1 = add_ipc_enabled_node (system); auto [rpc, rpc_ctx] = add_rpc (system, node1); nano::genesis genesis; auto send (std::make_shared<nano::send_block> (genesis.hash (), nano::public_key (), nano::dev::genesis_amount - 100, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (genesis.hash ()))); node1->process_active (send); node1->block_processor.flush (); node1->scheduler.flush (); ASSERT_FALSE (node1->active.empty ()); boost::property_tree::ptree request; request.put ("action", "confirmation_info"); request.put ("root", send->qualified_root ().to_string ()); request.put ("representatives", "true"); request.put ("json_block", "true"); { auto response (wait_response (system, rpc, request)); ASSERT_EQ (1, response.count ("announcements")); ASSERT_EQ (1, response.get<unsigned> ("voters")); ASSERT_EQ (send->hash ().to_string (), response.get<std::string> ("last_winner")); auto & blocks (response.get_child ("blocks")); ASSERT_EQ (1, blocks.size ()); auto & representatives (blocks.front ().second.get_child ("representatives")); ASSERT_EQ (1, representatives.size ()); ASSERT_EQ (0, response.get<unsigned> ("total_tally")); } }
1
16,809
Looks like this used to be UB before with an empty blocks and then calling `.second` on the `.front()`. Did it ever crash though?
nanocurrency-nano-node
cpp
@@ -121,7 +121,7 @@ function parseRunOn(runOn) { } function generateTopologyTests(testSuites, testContext, filter) { - testSuites.forEach(testSuite => { + for (const testSuite of testSuites) { // TODO: remove this when SPEC-1255 is completed let runOn = testSuite.runOn; if (!testSuite.runOn) {
1
'use strict'; const path = require('path'); const fs = require('fs'); const chai = require('chai'); const expect = chai.expect; const { EJSON } = require('bson'); const { isRecord } = require('../../../src/utils'); const TestRunnerContext = require('./context').TestRunnerContext; const resolveConnectionString = require('./utils').resolveConnectionString; const { shouldRunServerlessTest } = require('../../tools/utils'); // Promise.try alternative https://stackoverflow.com/questions/60624081/promise-try-without-bluebird/60624164?noredirect=1#comment107255389_60624164 function promiseTry(callback) { return new Promise((resolve, reject) => { try { resolve(callback()); } catch (e) { reject(e); } }); } chai.use(require('chai-subset')); chai.use(require('./matcher').default); function escape(string) { return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); } function translateClientOptions(options) { Object.keys(options).forEach(key => { if (['j', 'journal', 'fsync', 'wtimeout', 'wtimeoutms'].indexOf(key) >= 0) { throw new Error( `Unhandled write concern key needs to be added to options.writeConcern: ${key}` ); } if (key === 'w') { options.writeConcern = { w: options.w }; delete options[key]; } else if (key === 'readConcernLevel') { options.readConcern = { level: options.readConcernLevel }; delete options[key]; } else if (key === 'autoEncryptOpts') { options.autoEncryption = Object.assign({}, options.autoEncryptOpts); if (options.autoEncryptOpts.keyVaultNamespace == null) { options.autoEncryption.keyVaultNamespace = 'keyvault.datakeys'; } if (options.autoEncryptOpts.kmsProviders) { const kmsProviders = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || 'NOT_PROVIDED'); if (options.autoEncryptOpts.kmsProviders.local) { kmsProviders.local = options.autoEncryptOpts.kmsProviders.local; } if (options.autoEncryptOpts.kmsProviders.awsTemporary) { kmsProviders.aws = { accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID, secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, sessionToken: process.env.CSFLE_AWS_TEMP_SESSION_TOKEN }; } if (options.autoEncryptOpts.kmsProviders.awsTemporaryNoSessionToken) { kmsProviders.aws = { accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID, secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY }; } options.autoEncryption.kmsProviders = kmsProviders; } delete options.autoEncryptOpts; } }); return options; } function gatherTestSuites(specPath) { return fs .readdirSync(specPath) .filter(x => x.indexOf('.json') !== -1) .map(x => Object.assign(EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), { name: path.basename(x, '.json') }) ); } function parseTopologies(topologies) { if (topologies == null) { return ['replicaset', 'sharded', 'single']; } return topologies; } function parseRunOn(runOn) { return runOn.map(config => { const topology = parseTopologies(config.topology); const version = []; if (config.minServerVersion) { version.push(`>= ${config.minServerVersion}`); } if (config.maxServerVersion) { version.push(`<= ${config.maxServerVersion}`); } const mongodb = version.join(' '); return { topology, mongodb, authEnabled: !!config.authEnabled, serverless: config.serverless }; }); } function generateTopologyTests(testSuites, testContext, filter) { testSuites.forEach(testSuite => { // TODO: remove this when SPEC-1255 is completed let runOn = testSuite.runOn; if (!testSuite.runOn) { runOn = [{ minServerVersion: testSuite.minServerVersion }]; if (testSuite.maxServerVersion) { runOn.push({ maxServerVersion: testSuite.maxServerVersion }); } } const environmentRequirementList = parseRunOn(runOn); environmentRequirementList.forEach(requires => { const suiteName = `${testSuite.name} - ${requires.topology.join()}`; describe(suiteName, { metadata: { requires }, test: function () { beforeEach(() => prepareDatabaseForSuite(testSuite, testContext)); afterEach(() => testContext.cleanupAfterSuite()); testSuite.tests.forEach(spec => { const maybeIt = shouldRunSpecTest(this.configuration, requires, spec, filter) ? it : it.skip; maybeIt(spec.description, function () { let testPromise = Promise.resolve(); if (spec.failPoint) { testPromise = testPromise.then(() => testContext.enableFailPoint(spec.failPoint)); } // run the actual test testPromise = testPromise.then(() => runTestSuiteTest(this.configuration, spec, testContext) ); if (spec.failPoint) { testPromise = testPromise.then(() => testContext.disableFailPoint(spec.failPoint)); } return testPromise.then(() => validateOutcome(spec, testContext)); }); }); } }); }); }); } function shouldRunSpecTest(configuration, requires, spec, filter) { if (requires.authEnabled && process.env.AUTH !== 'auth') { // TODO(NODE-3488): We do not have a way to determine if auth is enabled in our mocha metadata // We need to do a admin.command({getCmdLineOpts: 1}) if it errors (code=13) auth is on return false; } if ( requires.serverless && !shouldRunServerlessTest(requires.serverless, !!process.env.SERVERLESS) ) { return false; } if ( spec.operations.some( op => op.name === 'waitForEvent' && op.arguments.event === 'PoolReadyEvent' ) ) { // TODO(NODE-2994): Connection storms work will add new events to connection pool return false; } if (spec.skipReason || (filter && typeof filter === 'function' && !filter(spec, configuration))) { return false; } return true; } // Test runner helpers function prepareDatabaseForSuite(suite, context) { context.dbName = suite.database_name || 'spec_db'; context.collectionName = suite.collection_name || 'spec_collection'; const db = context.sharedClient.db(context.dbName); if (context.skipPrepareDatabase) return Promise.resolve(); // Note: killAllSession is not supported on serverless, see CLOUDP-84298 const setupPromise = context.serverless ? Promise.resolve() : db .admin() .command({ killAllSessions: [] }) .catch(err => { if ( err.message.match(/no such (cmd|command)/) || err.message.match(/Failed to kill on some hosts/) || err.code === 11601 ) { return; } throw err; }); if (context.collectionName == null || context.dbName === 'admin') { return setupPromise; } const coll = db.collection(context.collectionName); return setupPromise .then(() => coll.drop({ writeConcern: { w: 'majority' } })) .catch(err => { if (!err.message.match(/ns not found/)) throw err; }) .then(() => { if (suite.key_vault_data) { const dataKeysCollection = context.sharedClient.db('keyvault').collection('datakeys'); return dataKeysCollection .drop({ writeConcern: { w: 'majority' } }) .catch(err => { if (!err.message.match(/ns not found/)) { throw err; } }) .then(() => { if (suite.key_vault_data.length) { return dataKeysCollection.insertMany(suite.key_vault_data, { writeConcern: { w: 'majority' } }); } }); } }) .then(() => { const options = { writeConcern: { w: 'majority' } }; if (suite.json_schema) { options.validator = { $jsonSchema: suite.json_schema }; } return db.createCollection(context.collectionName, options); }) .then(() => { if (suite.data && Array.isArray(suite.data) && suite.data.length > 0) { return coll.insertMany(suite.data, { writeConcern: { w: 'majority' } }); } }) .then(() => { return context.runForAllClients(client => { return client .db(context.dbName) .collection(context.collectionName) .distinct('x') .catch(() => {}); }); }); } function parseSessionOptions(options) { const result = Object.assign({}, options); if (result.defaultTransactionOptions && result.defaultTransactionOptions.readPreference) { result.defaultTransactionOptions.readPreference = normalizeReadPreference( result.defaultTransactionOptions.readPreference.mode ); } return result; } const IGNORED_COMMANDS = new Set(['ismaster', 'configureFailPoint', 'endSessions']); const SDAM_EVENTS = new Set([ 'serverOpening', 'serverClosed', 'serverDescriptionChanged', 'topologyOpening', 'topologyClosed', 'topologyDescriptionChanged', 'serverHeartbeatStarted', 'serverHeartbeatSucceeded', 'serverHeartbeatFailed' ]); const CMAP_EVENTS = new Set([ 'connectionPoolCreated', 'connectionPoolClosed', 'connectionCreated', 'connectionReady', 'connectionClosed', 'connectionCheckOutStarted', 'connectionCheckOutFailed', 'connectionCheckedOut', 'connectionCheckedIn', 'connectionPoolCleared' ]); let displayCommands = false; function runTestSuiteTest(configuration, spec, context) { context.commandEvents = []; const clientOptions = translateClientOptions( Object.assign( { heartbeatFrequencyMS: 100, minHeartbeatFrequencyMS: 100, monitorCommands: true }, spec.clientOptions ) ); const url = resolveConnectionString(configuration, spec, context); const client = configuration.newClient(url, clientOptions); CMAP_EVENTS.forEach(eventName => client.on(eventName, event => context.cmapEvents.push(event))); SDAM_EVENTS.forEach(eventName => client.on(eventName, event => context.sdamEvents.push(event))); let skippedInitialPing = false; client.on('commandStarted', event => { if (IGNORED_COMMANDS.has(event.commandName)) { return; } // If credentials were provided, then the Topology sends an initial `ping` command // that we want to skip if (event.commandName === 'ping' && client.topology.s.credentials && !skippedInitialPing) { skippedInitialPing = true; return; } context.commandEvents.push(event); // very useful for debugging if (displayCommands) { // console.dir(event, { depth: 5 }); } }); return client.connect().then(client => { context.testClient = client; const sessionOptions = Object.assign({}, spec.transactionOptions); spec.sessionOptions = spec.sessionOptions || {}; const database = client.db(context.dbName); let session0, session1; let savedSessionData; if (context.useSessions) { try { session0 = client.startSession( Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0)) ); session1 = client.startSession( Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1)) ); savedSessionData = { session0: JSON.parse(EJSON.stringify(session0.id)), session1: JSON.parse(EJSON.stringify(session1.id)) }; } catch (err) { // ignore } } // enable to see useful APM debug information at the time of actual test run // displayCommands = true; const operationContext = { client, database, collectionName: context.collectionName, session0, session1, testRunner: context }; let testPromise = Promise.resolve(); return testPromise .then(() => testOperations(spec, operationContext)) .catch(err => { // If the driver throws an exception / returns an error while executing this series // of operations, store the error message. throw err; }) .then(() => { const promises = []; if (session0) promises.push(session0.endSession()); if (session1) promises.push(session1.endSession()); return Promise.all(promises); }) .then(() => validateExpectations(context.commandEvents, spec, savedSessionData)); }); } function validateOutcome(testData, testContext) { if (testData.outcome && testData.outcome.collection) { const outcomeCollection = testData.outcome.collection.name || testContext.collectionName; // use the client without transactions to verify return testContext.sharedClient .db(testContext.dbName) .collection(outcomeCollection) .find({}, { readPreference: 'primary', readConcern: { level: 'local' } }) .sort({ _id: 1 }) .toArray() .then(docs => { expect(docs).to.matchMongoSpec(testData.outcome.collection.data); }); } return Promise.resolve(); } function validateExpectations(commandEvents, spec, savedSessionData) { if (!spec.expectations || !Array.isArray(spec.expectations) || spec.expectations.length === 0) { return; } const actualEvents = normalizeCommandShapes(commandEvents); const rawExpectedEvents = spec.expectations.map(x => x.command_started_event); const expectedEvents = normalizeCommandShapes(rawExpectedEvents); expect(actualEvents).to.have.length(expectedEvents.length); expectedEvents.forEach((expected, idx) => { const actual = actualEvents[idx]; if (expected.commandName != null) { expect(actual.commandName).to.equal(expected.commandName); } if (expected.databaseName != null) { expect(actual.databaseName).to.equal(expected.databaseName); } const actualCommand = actual.command; const expectedCommand = expected.command; if (expectedCommand.sort) { // TODO: This is a workaround that works because all sorts in the specs // are objects with one key; ideally we'd want to adjust the spec definitions // to indicate whether order matters for any given key and set general // expectations accordingly (see NODE-3235) expect(Object.keys(expectedCommand.sort)).to.have.lengthOf(1); expect(actualCommand.sort).to.be.instanceOf(Map); expect(actualCommand.sort.size).to.equal(1); const expectedKey = Object.keys(expectedCommand.sort)[0]; expect(actualCommand.sort).to.have.all.keys(expectedKey); actualCommand.sort = { [expectedKey]: actualCommand.sort.get(expectedKey) }; } expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand); }); } function normalizeCommandShapes(commands) { return commands.map(def => { const output = JSON.parse( EJSON.stringify( { command: def.command, commandName: def.command_name || def.commandName || Object.keys(def.command)[0], databaseName: def.database_name ? def.database_name : def.databaseName }, { relaxed: true } ) ); // TODO: this is a workaround to preserve sort Map type until NODE-3235 is completed if (def.command.sort) { output.command.sort = def.command.sort; } return output; }); } function extractCrudResult(result, operation) { if (Array.isArray(result) || !isRecord(result)) { return result; } if (result.value) { // some of our findAndModify results return more than just an id, so we need to pluck const resultKeys = Object.keys(operation.result); if (resultKeys.length === 1 && resultKeys[0] === '_id') { return { _id: result.value._id }; } return result.value; } return operation.result; } function isTransactionCommand(command) { return ['startTransaction', 'commitTransaction', 'abortTransaction'].indexOf(command) !== -1; } function isTestRunnerCommand(context, commandName) { const testRunnerContext = context.testRunner; let methods = new Set(); let object = testRunnerContext; while (object !== Object.prototype) { Object.getOwnPropertyNames(object) .filter(prop => typeof object[prop] === 'function' && prop !== 'constructor') .map(prop => methods.add(prop)); object = Object.getPrototypeOf(object); } return methods.has(commandName); } function extractBulkRequests(requests) { return requests.map(request => ({ [request.name]: request.arguments })); } function translateOperationName(operationName) { if (operationName === 'runCommand') return 'command'; if (operationName === 'listDatabaseNames') return 'listDatabases'; if (operationName === 'listCollectionNames') return 'listCollections'; return operationName; } function normalizeReadPreference(mode) { return mode.charAt(0).toLowerCase() + mode.substr(1); } function resolveOperationArgs(operationName, operationArgs, context) { const result = []; function pluck(fromObject, toArray, fields) { for (const field of fields) { if (fromObject[field]) toArray.push(fromObject[field]); } } // TODO: migrate all operations here if (operationName === 'distinct') { pluck(operationArgs, result, ['fieldName', 'filter']); if (result.length === 1) result.push({}); } else { return; } // compile the options const options = {}; if (operationArgs.options) { Object.assign(options, operationArgs.options); if (options.readPreference) { options.readPreference = normalizeReadPreference(options.readPreference.mode); } } if (operationArgs.session) { if (isTransactionCommand(operationName)) return; options.session = context[operationArgs.session]; } result.push(options); // determine if there is a callback to add if (operationArgs.callback) { result.push(() => testOperations(operationArgs.callback, context, { swallowOperationErrors: false }) ); } return result; } const CURSOR_COMMANDS = new Set(['find', 'aggregate', 'listIndexes', 'listCollections']); const ADMIN_COMMANDS = new Set(['listDatabases']); function maybeSession(operation, context) { return ( operation && operation.arguments && operation.arguments.session && context[operation.arguments.session] ); } const kOperations = new Map([ [ 'recordPrimary', (operation, testRunner, context /*, options */) => { testRunner.recordPrimary(context.client); } ], [ 'waitForPrimaryChange', (operation, testRunner, context /*, options */) => { return testRunner.waitForPrimaryChange(context.client); } ], [ 'runOnThread', (operation, testRunner, context, options) => { const args = operation.arguments; const threadName = args.name; const subOperation = args.operation; return testRunner.runOnThread( threadName, testOperation(subOperation, context[subOperation.object], context, options) ); } ], [ 'createIndex', (operation, collection, context /*, options */) => { const fieldOrSpec = operation.arguments.keys; const options = { session: maybeSession(operation, context) }; if (operation.arguments.name) options.name = operation.arguments.name; return collection.createIndex(fieldOrSpec, options); } ], [ 'createCollection', (operation, db, context /*, options */) => { const collectionName = operation.arguments.collection; const session = maybeSession(operation, context); return db.createCollection(collectionName, { session }); } ], [ 'dropCollection', (operation, db, context /*, options */) => { const collectionName = operation.arguments.collection; const session = maybeSession(operation, context); return db.dropCollection(collectionName, { session }); } ], [ 'dropIndex', (operation, collection /*, context, options */) => { const indexName = operation.arguments.name; const session = maybeSession(operation, context); return collection.dropIndex(indexName, { session }); } ], [ 'mapReduce', (operation, collection, context /*, options */) => { const args = operation.arguments; const map = args.map; const reduce = args.reduce; const options = { session: maybeSession(operation, context) }; if (args.out) options.out = args.out; return collection.mapReduce(map, reduce, options); } ] ]); /** * @param {object} operation the operation definition from the spec test * @param {object} obj the object to call the operation on * @param {object} context a context object containing sessions used for the test * @param {object} [options] Optional settings * @param {boolean} [options.swallowOperationErrors] Generally we want to observe operation errors, validate them against our expectations, and then swallow them. In cases like `withTransaction` we want to use the same `testOperations` to build the lambda, and in those cases it is not desireable to swallow the errors, since we need to test this behavior. */ function testOperation(operation, obj, context, options) { options = options || { swallowOperationErrors: true }; const opOptions = {}; let args = []; const operationName = translateOperationName(operation.name); let opPromise; if (kOperations.has(operationName)) { opPromise = kOperations.get(operationName)(operation, obj, context, options); } else { if (operation.arguments) { args = resolveOperationArgs(operationName, operation.arguments, context); if (args == null) { args = []; Object.keys(operation.arguments).forEach(key => { if (key === 'callback') { args.push(() => testOperations(operation.arguments.callback, context, { swallowOperationErrors: false }) ); return; } if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) { return args.unshift(operation.arguments[key]); } if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') { return args.unshift(operation.arguments[key]); } if (key === 'command') return args.unshift(operation.arguments[key]); if (key === 'requests') return args.unshift(extractBulkRequests(operation.arguments[key])); if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]); if (key === 'session') { if (isTransactionCommand(operationName)) return; opOptions.session = context[operation.arguments.session]; return; } if (key === 'returnDocument') { opOptions.returnDocument = operation.arguments[key].toLowerCase(); return; } if (key === 'options') { Object.assign(opOptions, operation.arguments[key]); if (opOptions.readPreference) { opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode); } return; } if (key === 'readPreference') { opOptions[key] = normalizeReadPreference(operation.arguments[key].mode); return; } opOptions[key] = operation.arguments[key]; }); } } if ( args.length === 0 && !isTransactionCommand(operationName) && !isTestRunnerCommand(context, operationName) ) { args.push({}); } if (Object.keys(opOptions).length > 0) { // NOTE: this is awful, but in order to provide options for some methods we need to add empty // query objects. if (operationName === 'distinct') { args.push({}); } args.push(opOptions); } if (ADMIN_COMMANDS.has(operationName)) { obj = obj.db().admin(); } if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') { opOptions.nameOnly = true; } if (CURSOR_COMMANDS.has(operationName)) { // `find` creates a cursor, so we need to call `toArray` on it const cursor = obj[operationName].apply(obj, args); opPromise = cursor.toArray(); } else { // wrap this in a `promiseTry` because some operations might throw opPromise = promiseTry(() => obj[operationName].apply(obj, args)); } } if (operation.error) { opPromise = opPromise.then( () => { throw new Error('expected an error!'); }, () => {} ); } if (operation.result) { const result = operation.result; if ( result.errorContains != null || result.errorCodeName || result.errorLabelsContain || result.errorLabelsOmit ) { return opPromise.then( () => { throw new Error('expected an error!'); }, err => { const errorContains = result.errorContains; const errorCodeName = result.errorCodeName; const errorLabelsContain = result.errorLabelsContain; const errorLabelsOmit = result.errorLabelsOmit; if (errorLabelsContain) { expect(err).to.have.property('errorLabels'); expect(err.errorLabels).to.include.members(errorLabelsContain); } if (errorLabelsOmit) { if (err.errorLabels && Array.isArray(err.errorLabels) && err.errorLabels.length !== 0) { expect(errorLabelsOmit).to.not.include.members(err.errorLabels); } } if (operation.result.errorContains) { expect(err.message).to.match(new RegExp(escape(errorContains), 'i')); } if (errorCodeName) { expect(err.codeName).to.equal(errorCodeName); } if (!options.swallowOperationErrors) { throw err; } } ); } return opPromise.then(opResult => { const actual = extractCrudResult(opResult, operation); expect(actual).to.matchMongoSpec(operation.result); }); } return opPromise; } function convertCollectionOptions(options) { const result = {}; Object.keys(options).forEach(key => { if (key === 'readPreference') { result[key] = normalizeReadPreference(options[key].mode); } else { result[key] = options[key]; } }); return result; } function testOperations(testData, operationContext, options) { options = options || { swallowOperationErrors: true }; return testData.operations.reduce((combined, operation) => { return combined.then(() => { const object = operation.object || 'collection'; if (object === 'collection') { const db = operationContext.database; const collectionName = operationContext.collectionName; const collectionOptions = operation.collectionOptions || {}; operationContext[object] = db.collection( collectionName, convertCollectionOptions(collectionOptions) ); } return testOperation(operation, operationContext[object], operationContext, options); }); }, Promise.resolve()); } module.exports = { TestRunnerContext, gatherTestSuites, generateTopologyTests, parseRunOn };
1
21,211
Sorry about the code churn here, reworking this use normal loops and async await made debugging far more straight forward. For example, if configuring the fail point fails you find out when stepping over that line vs having to look for the ultimate catch clause that handled it.
mongodb-node-mongodb-native
js
@@ -56,7 +56,9 @@ const test = (suite, buildConfig = config.defaultBuildConfig, options) => { braveArgs.push('--test-launcher-jobs=' + options.test_launcher_jobs) } + const args = util.buildArgsToString(config.buildArgs()) // Build the tests + util.run('gn', ['gen', config.outputDir, '--args="' + args + '"'], config.defaultOptions) util.run('ninja', ['-C', config.outputDir, suite], config.defaultOptions) if (config.targetOS === 'ios') {
1
const path = require('path') const config = require('../lib/config') const util = require('../lib/util') const getTestBinary = (suite) => { return (process.platform === 'win32') ? `${suite}.exe` : suite } const getTestsToRun = (config, suite) => { testsToRun = [suite] if (suite === 'brave_unit_tests') { if (config.targetOS !== 'android') { testsToRun.push('brave_installer_unittests') } else { testsToRun.push('bin/run_brave_public_test_apk') } } return testsToRun } const test = (suite, buildConfig = config.defaultBuildConfig, options) => { config.buildConfig = buildConfig config.update(options) const braveArgs = [ '--enable-logging=stderr' ] // Android doesn't support --v if (config.targetOS !== 'android') { braveArgs.push('--v=' + options.v) if (options.vmodule) { braveArgs.push('--vmodule=' + options.vmodule) } } if (options.filter) { braveArgs.push('--gtest_filter=' + options.filter) } if (options.output) { braveArgs.push('--gtest_output=xml:' + options.output) } if (options.disable_brave_extension) { braveArgs.push('--disable-brave-extension') } if (options.single_process) { braveArgs.push('--single_process') } if (options.test_launcher_jobs) { braveArgs.push('--test-launcher-jobs=' + options.test_launcher_jobs) } // Build the tests util.run('ninja', ['-C', config.outputDir, suite], config.defaultOptions) if (config.targetOS === 'ios') { util.run(path.join(config.outputDir, "iossim"), [ path.join(config.outputDir, `${suite}.app`), path.join(config.outputDir, `${suite}.app/PlugIns/${suite}_module.xctest`) ], config.defaultOptions) } else { // Fix the tests util.run('ninja', ['-C', config.outputDir, "fix_brave_test_install_name"], config.defaultOptions) util.run('ninja', ['-C', config.outputDir, "fix_brave_test_install_name_adblock"], config.defaultOptions) util.run('ninja', ['-C', config.outputDir, "fix_brave_test_install_name_speedreader"], config.defaultOptions) // Run the tests getTestsToRun(config, suite).forEach((testSuite) => { if (options.output) { braveArgs.splice(braveArgs.indexOf('--gtest_output=xml:' + options.output, 1)) braveArgs.push(`--gtest_output=xml:${testSuite}.xml`) } util.run(path.join(config.outputDir, getTestBinary(testSuite)), braveArgs, config.defaultOptions) }) } } module.exports = test
1
6,672
this is wrong and should be reverted
brave-brave-browser
js