diff --git a/go/app/cli.go b/go/app/cli.go index 095705b7e..93f46b64f 100644 --- a/go/app/cli.go +++ b/go/app/cli.go @@ -609,7 +609,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("stop-slave", "Replication, general", `Issue a STOP SLAVE on an instance`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - _, err := inst.StopSlave(instanceKey) + _, err := inst.StopReplication(instanceKey) if err != nil { log.Fatale(err) } @@ -618,7 +618,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("start-slave", "Replication, general", `Issue a START SLAVE on an instance`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - _, err := inst.StartSlave(instanceKey) + _, err := inst.StartReplication(instanceKey) if err != nil { log.Fatale(err) } @@ -627,7 +627,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("restart-slave", "Replication, general", `STOP and START SLAVE on an instance`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - _, err := inst.RestartSlave(instanceKey) + _, err := inst.RestartReplication(instanceKey) if err != nil { log.Fatale(err) } @@ -636,7 +636,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("reset-slave", "Replication, general", `Issues a RESET SLAVE command; use with care`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - _, err := inst.ResetSlaveOperation(instanceKey) + _, err := inst.ResetReplicationOperation(instanceKey) if err != nil { log.Fatale(err) } @@ -732,7 +732,7 @@ func Cli(command string, strict bool, instance string, destination string, owner if instanceKey == nil { log.Fatalf("Unresolved instance") } - statements, err := inst.GetSlaveRestartPreserveStatements(instanceKey, *config.RuntimeCLIFlags.Statement) + statements, err := inst.GetReplicationRestartPreserveStatements(instanceKey, *config.RuntimeCLIFlags.Statement) if err != nil { log.Fatale(err) } diff --git a/go/http/api.go b/go/http/api.go index bc53b19bb..d6e11a2f3 100644 --- a/go/http/api.go +++ b/go/http/api.go @@ -603,8 +603,8 @@ func (this *HttpAPI) MakeCoMaster(params martini.Params, r render.Render, req *h Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance made co-master: %+v", instance.Key), Details: instance}) } -// ResetSlave makes a replica forget about its master, effectively breaking the replication -func (this *HttpAPI) ResetSlave(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// ResetReplication makes a replica forget about its master, effectively breaking the replication +func (this *HttpAPI) ResetReplication(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -615,7 +615,7 @@ func (this *HttpAPI) ResetSlave(params martini.Params, r render.Render, req *htt Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - instance, err := inst.ResetSlaveOperation(&instanceKey) + instance, err := inst.ResetReplicationOperation(&instanceKey) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1256,8 +1256,8 @@ func (this *HttpAPI) SkipQuery(params martini.Params, r render.Render, req *http Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Query skipped on %+v", instance.Key), Details: instance}) } -// StartSlave starts replication on given instance -func (this *HttpAPI) StartSlave(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// StartReplication starts replication on given instance +func (this *HttpAPI) StartReplication(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -1268,7 +1268,7 @@ func (this *HttpAPI) StartSlave(params martini.Params, r render.Render, req *htt Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - instance, err := inst.StartSlave(&instanceKey) + instance, err := inst.StartReplication(&instanceKey) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1277,8 +1277,8 @@ func (this *HttpAPI) StartSlave(params martini.Params, r render.Render, req *htt Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Replica started: %+v", instance.Key), Details: instance}) } -// RestartSlave stops & starts replication on given instance -func (this *HttpAPI) RestartSlave(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// RestartReplication stops & starts replication on given instance +func (this *HttpAPI) RestartReplication(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -1289,7 +1289,7 @@ func (this *HttpAPI) RestartSlave(params martini.Params, r render.Render, req *h Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - instance, err := inst.RestartSlave(&instanceKey) + instance, err := inst.RestartReplication(&instanceKey) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1298,8 +1298,8 @@ func (this *HttpAPI) RestartSlave(params martini.Params, r render.Render, req *h Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Replica restarted: %+v", instance.Key), Details: instance}) } -// StopSlave stops replication on given instance -func (this *HttpAPI) StopSlave(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// StopReplication stops replication on given instance +func (this *HttpAPI) StopReplication(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -1310,7 +1310,7 @@ func (this *HttpAPI) StopSlave(params martini.Params, r render.Render, req *http Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - instance, err := inst.StopSlave(&instanceKey) + instance, err := inst.StopReplication(&instanceKey) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1319,8 +1319,8 @@ func (this *HttpAPI) StopSlave(params martini.Params, r render.Render, req *http Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Replica stopped: %+v", instance.Key), Details: instance}) } -// StopSlaveNicely stops replication on given instance, such that sql thead is aligned with IO thread -func (this *HttpAPI) StopSlaveNicely(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// StopReplicationNicely stops replication on given instance, such that sql thead is aligned with IO thread +func (this *HttpAPI) StopReplicationNicely(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -1331,7 +1331,7 @@ func (this *HttpAPI) StopSlaveNicely(params martini.Params, r render.Render, req Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - instance, err := inst.StopSlaveNicely(&instanceKey, 0) + instance, err := inst.StopReplicationNicely(&instanceKey, 0) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1393,10 +1393,10 @@ func (this *HttpAPI) PurgeBinaryLogs(params martini.Params, r render.Render, req Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Binary logs flushed on: %+v", instance.Key), Details: instance}) } -// RestartSlaveStatements receives a query to execute that requires a replication restart to apply. +// RestartReplicationStatements receives a query to execute that requires a replication restart to apply. // As an example, this may be `set global rpl_semi_sync_slave_enabled=1`. orchestrator will check // replication status on given host and will wrap with appropriate stop/start statements, if need be. -func (this *HttpAPI) RestartSlaveStatements(params martini.Params, r render.Render, req *http.Request, user auth.User) { +func (this *HttpAPI) RestartReplicationStatements(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return @@ -1409,7 +1409,7 @@ func (this *HttpAPI) RestartSlaveStatements(params martini.Params, r render.Rend } query := req.URL.Query().Get("q") - statements, err := inst.GetSlaveRestartPreserveStatements(&instanceKey, query) + statements, err := inst.GetReplicationRestartPreserveStatements(&instanceKey, query) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) @@ -3634,18 +3634,18 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequest(m, "gtid-errant-reset-master/:host/:port", this.ErrantGTIDResetMaster) this.registerAPIRequest(m, "gtid-errant-inject-empty/:host/:port", this.ErrantGTIDInjectEmpty) this.registerAPIRequest(m, "skip-query/:host/:port", this.SkipQuery) - this.registerAPIRequest(m, "start-slave/:host/:port", this.StartSlave) - this.registerAPIRequest(m, "restart-slave/:host/:port", this.RestartSlave) - this.registerAPIRequest(m, "stop-slave/:host/:port", this.StopSlave) - this.registerAPIRequest(m, "stop-slave-nice/:host/:port", this.StopSlaveNicely) - this.registerAPIRequest(m, "reset-slave/:host/:port", this.ResetSlave) + this.registerAPIRequest(m, "start-slave/:host/:port", this.StartReplication) + this.registerAPIRequest(m, "restart-slave/:host/:port", this.RestartReplication) + this.registerAPIRequest(m, "stop-slave/:host/:port", this.StopReplication) + this.registerAPIRequest(m, "stop-slave-nice/:host/:port", this.StopReplicationNicely) + this.registerAPIRequest(m, "reset-slave/:host/:port", this.ResetReplication) this.registerAPIRequest(m, "detach-slave/:host/:port", this.DetachReplicaMasterHost) this.registerAPIRequest(m, "reattach-slave/:host/:port", this.ReattachReplicaMasterHost) this.registerAPIRequest(m, "detach-slave-master-host/:host/:port", this.DetachReplicaMasterHost) this.registerAPIRequest(m, "reattach-slave-master-host/:host/:port", this.ReattachReplicaMasterHost) this.registerAPIRequest(m, "flush-binary-logs/:host/:port", this.FlushBinaryLogs) this.registerAPIRequest(m, "purge-binary-logs/:host/:port/:logFile", this.PurgeBinaryLogs) - this.registerAPIRequest(m, "restart-slave-statements/:host/:port", this.RestartSlaveStatements) + this.registerAPIRequest(m, "restart-slave-statements/:host/:port", this.RestartReplicationStatements) this.registerAPIRequest(m, "enable-semi-sync-master/:host/:port", this.EnableSemiSyncMaster) this.registerAPIRequest(m, "disable-semi-sync-master/:host/:port", this.DisableSemiSyncMaster) this.registerAPIRequest(m, "enable-semi-sync-replica/:host/:port", this.EnableSemiSyncReplica) diff --git a/go/inst/analysis.go b/go/inst/analysis.go index 02e33951d..cdcdb02e6 100644 --- a/go/inst/analysis.go +++ b/go/inst/analysis.go @@ -17,6 +17,7 @@ package inst import ( + "encoding/json" "fmt" "strings" @@ -130,7 +131,8 @@ type ReplicationAnalysis struct { CountReplicasFailingToConnectToMaster uint CountDowntimedReplicas uint ReplicationDepth uint - SlaveHosts InstanceKeyMap + Replicas InstanceKeyMap + SlaveHosts InstanceKeyMap // for backwards compatibility. Equals `Replicas` IsFailingToConnectToMaster bool Analysis AnalysisCode Description string @@ -177,10 +179,21 @@ type ReplicationAnalysisChangelog struct { Changelog []string } +func (this *ReplicationAnalysis) MarshalJSON() ([]byte, error) { + i := struct { + ReplicationAnalysis + }{} + i.ReplicationAnalysis = *this + // backwards compatibility + i.SlaveHosts = i.Replicas + + return json.Marshal(i) +} + // ReadReplicaHostsFromString parses and reads replica keys from comma delimited string func (this *ReplicationAnalysis) ReadReplicaHostsFromString(replicaHostsString string) error { - this.SlaveHosts = *NewInstanceKeyMap() - return this.SlaveHosts.ReadCommaDelimitedList(replicaHostsString) + this.Replicas = *NewInstanceKeyMap() + return this.Replicas.ReadCommaDelimitedList(replicaHostsString) } // AnalysisString returns a human friendly description of all analysis issues diff --git a/go/inst/analysis_dao.go b/go/inst/analysis_dao.go index 5cadc449b..cf8b3876e 100644 --- a/go/inst/analysis_dao.go +++ b/go/inst/analysis_dao.go @@ -434,8 +434,8 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.IsBinlogServer = m.GetBool("is_binlog_server") a.ClusterDetails.ReadRecoveryInfo() - a.SlaveHosts = *NewInstanceKeyMap() - a.SlaveHosts.ReadCommaDelimitedList(m.GetString("slave_hosts")) + a.Replicas = *NewInstanceKeyMap() + a.Replicas.ReadCommaDelimitedList(m.GetString("slave_hosts")) countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 diff --git a/go/inst/instance.go b/go/inst/instance.go index ca98aa5be..2087b9645 100644 --- a/go/inst/instance.go +++ b/go/inst/instance.go @@ -45,7 +45,7 @@ type Instance struct { Binlog_format string BinlogRowImage string LogBinEnabled bool - LogSlaveUpdatesEnabled bool + LogSlaveUpdatesEnabled bool // for API backwards compatibility. Equals `LogReplicationUpdatesEnabled` LogReplicationUpdatesEnabled bool SelfBinlogCoordinates BinlogCoordinates MasterKey InstanceKey @@ -53,9 +53,9 @@ type Instance struct { AncestryUUID string IsDetachedMaster bool - Slave_SQL_Running bool + Slave_SQL_Running bool // for API backwards compatibility. Equals `ReplicationSQLThreadRuning` ReplicationSQLThreadRuning bool - Slave_IO_Running bool + Slave_IO_Running bool // for API backwards compatibility. Equals `ReplicationIOThreadRuning` ReplicationIOThreadRuning bool ReplicationSQLThreadState ReplicationThreadState ReplicationIOThreadState ReplicationThreadState @@ -80,9 +80,9 @@ type Instance struct { masterExecutedGtidSet string // Not exported - SlaveLagSeconds sql.NullInt64 + SlaveLagSeconds sql.NullInt64 // for API backwards compatibility. Equals `ReplicationLagSeconds` ReplicationLagSeconds sql.NullInt64 - SlaveHosts InstanceKeyMap + SlaveHosts InstanceKeyMap // for API backwards compatibility. Equals `Replicas` Replicas InstanceKeyMap ClusterName string SuggestedClusterAlias string @@ -134,8 +134,8 @@ type Instance struct { // NewInstance creates a new, empty instance func NewInstance() *Instance { return &Instance{ - SlaveHosts: make(map[InstanceKey]bool), - Problems: []string{}, + Replicas: make(map[InstanceKey]bool), + Problems: []string{}, } } @@ -145,12 +145,13 @@ func (this *Instance) MarshalJSON() ([]byte, error) { }{} i.Instance = *this // change terminology. Users of the orchestrator API can switch to new terminology and avoid using old terminology - i.Replicas = i.SlaveHosts - i.ReplicationLagSeconds = this.SlaveLagSeconds - i.ReplicationSQLThreadRuning = this.Slave_SQL_Running - i.ReplicationIOThreadRuning = this.Slave_IO_Running - i.LogReplicationUpdatesEnabled = this.LogSlaveUpdatesEnabled - // + // flip + i.SlaveHosts = i.Replicas + i.SlaveLagSeconds = this.ReplicationLagSeconds + i.LogSlaveUpdatesEnabled = this.LogReplicationUpdatesEnabled + i.Slave_SQL_Running = this.ReplicationSQLThreadRuning + i.Slave_IO_Running = this.ReplicationIOThreadRuning + return json.Marshal(i) } @@ -354,7 +355,7 @@ func (this *Instance) NextGTID() (string, error) { // AddReplicaKey adds a replica to the list of this instance's replicas. func (this *Instance) AddReplicaKey(replicaKey *InstanceKey) { - this.SlaveHosts.AddKey(*replicaKey) + this.Replicas.AddKey(*replicaKey) } // GetNextBinaryLog returns the successive, if any, binary log file to the one given @@ -395,7 +396,7 @@ func (this *Instance) CanReplicateFrom(other *Instance) (bool, error) { return false, fmt.Errorf("instance does not have binary logs enabled: %+v", other.Key) } if other.IsReplica() { - if !other.LogSlaveUpdatesEnabled { + if !other.LogReplicationUpdatesEnabled { return false, fmt.Errorf("instance does not have log_slave_updates enabled: %+v", other.Key) } // OK for a master to not have log_slave_updates @@ -404,7 +405,7 @@ func (this *Instance) CanReplicateFrom(other *Instance) (bool, error) { if this.IsSmallerMajorVersion(other) && !this.IsBinlogServer() { return false, fmt.Errorf("instance %+v has version %s, which is lower than %s on %+v ", this.Key, this.Version, other.Version, other.Key) } - if this.LogBinEnabled && this.LogSlaveUpdatesEnabled { + if this.LogBinEnabled && this.LogReplicationUpdatesEnabled { if this.IsSmallerBinlogFormat(other) { return false, fmt.Errorf("Cannot replicate from %+v binlog format on %+v to %+v on %+v", other.Binlog_format, other.Key, this.Binlog_format, this.Key) } @@ -451,7 +452,7 @@ func (this *Instance) CanMove() (bool, error) { return false, fmt.Errorf("%+v: instance is not replicating", this.Key) } if !this.SecondsBehindMaster.Valid { - return false, fmt.Errorf("%+v: cannot determine slave lag", this.Key) + return false, fmt.Errorf("%+v: cannot determine replication lag", this.Key) } if !this.HasReasonableMaintenanceReplicationLag() { return false, fmt.Errorf("%+v: lags too much", this.Key) @@ -515,10 +516,10 @@ func (this *Instance) LagStatusString() string { if this.IsReplica() && !this.SecondsBehindMaster.Valid { return "null" } - if this.IsReplica() && this.SlaveLagSeconds.Int64 > int64(config.Config.ReasonableMaintenanceReplicationLagSeconds) { - return fmt.Sprintf("%+vs", this.SlaveLagSeconds.Int64) + if this.IsReplica() && this.ReplicationLagSeconds.Int64 > int64(config.Config.ReasonableMaintenanceReplicationLagSeconds) { + return fmt.Sprintf("%+vs", this.ReplicationLagSeconds.Int64) } - return fmt.Sprintf("%+vs", this.SlaveLagSeconds.Int64) + return fmt.Sprintf("%+vs", this.ReplicationLagSeconds.Int64) } func (this *Instance) descriptionTokens() (tokens []string) { @@ -537,7 +538,7 @@ func (this *Instance) descriptionTokens() (tokens []string) { } { extraTokens := []string{} - if this.LogBinEnabled && this.LogSlaveUpdatesEnabled { + if this.LogBinEnabled && this.LogReplicationUpdatesEnabled { extraTokens = append(extraTokens, ">>") } if this.UsingGTID() || this.SupportsOracleGTID { diff --git a/go/inst/instance_dao.go b/go/inst/instance_dao.go index 154b38f21..cbb822cfc 100644 --- a/go/inst/instance_dao.go +++ b/go/inst/instance_dao.go @@ -58,13 +58,13 @@ const ( var instanceReadChan = make(chan bool, backendDBConcurrency) var instanceWriteChan = make(chan bool, backendDBConcurrency) -// InstancesByCountSlaveHosts is a sortable type for Instance -type InstancesByCountSlaveHosts [](*Instance) +// InstancesByCountReplicas is a sortable type for Instance +type InstancesByCountReplicas [](*Instance) -func (this InstancesByCountSlaveHosts) Len() int { return len(this) } -func (this InstancesByCountSlaveHosts) Swap(i, j int) { this[i], this[j] = this[j], this[i] } -func (this InstancesByCountSlaveHosts) Less(i, j int) bool { - return len(this[i].SlaveHosts) < len(this[j].SlaveHosts) +func (this InstancesByCountReplicas) Len() int { return len(this) } +func (this InstancesByCountReplicas) Swap(i, j int) { this[i], this[j] = this[j], this[i] } +func (this InstancesByCountReplicas) Less(i, j int) bool { + return len(this[i].Replicas) < len(this[j].Replicas) } // instanceKeyInformativeClusterName is a non-authoritative cache; used for auditing or general purpose. @@ -229,7 +229,7 @@ func (instance *Instance) checkMaxScale(db *sql.DB, latency *stopwatch.NamedStop instance.Binlog_format = "INHERIT" instance.ReadOnly = true instance.LogBinEnabled = true - instance.LogSlaveUpdatesEnabled = true + instance.LogReplicationUpdatesEnabled = true resolvedHostname = instance.Key.Hostname latency.Start("backend") UpdateResolvedHostname(resolvedHostname, resolvedHostname) @@ -384,7 +384,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, var mysqlHostname, mysqlReportHost string err = db.QueryRow("select @@global.hostname, ifnull(@@global.report_host, ''), @@global.server_id, @@global.version, @@global.version_comment, @@global.read_only, @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates").Scan( - &mysqlHostname, &mysqlReportHost, &instance.ServerID, &instance.Version, &instance.VersionComment, &instance.ReadOnly, &instance.Binlog_format, &instance.LogBinEnabled, &instance.LogSlaveUpdatesEnabled) + &mysqlHostname, &mysqlReportHost, &instance.ServerID, &instance.Version, &instance.VersionComment, &instance.ReadOnly, &instance.Binlog_format, &instance.LogBinEnabled, &instance.LogReplicationUpdatesEnabled) if err != nil { goto Cleanup } @@ -526,12 +526,12 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.HasReplicationCredentials = (m.GetString("Master_User") != "") instance.ReplicationIOThreadState = ReplicationThreadStateFromStatus(m.GetString("Slave_IO_Running")) instance.ReplicationSQLThreadState = ReplicationThreadStateFromStatus(m.GetString("Slave_SQL_Running")) - instance.Slave_IO_Running = instance.ReplicationIOThreadState.IsRunning() + instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() if isMaxScale110 { // Covering buggy MaxScale 1.1.0 - instance.Slave_IO_Running = instance.Slave_IO_Running && (m.GetString("Slave_IO_State") == "Binlog Dump") + instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadRuning && (m.GetString("Slave_IO_State") == "Binlog Dump") } - instance.Slave_SQL_Running = instance.ReplicationSQLThreadState.IsRunning() + instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() instance.ReadBinlogCoordinates.LogFile = m.GetString("Master_Log_File") instance.ReadBinlogCoordinates.LogPos = m.GetInt64("Read_Master_Log_Pos") instance.ExecBinlogCoordinates.LogFile = m.GetString("Relay_Master_Log_File") @@ -570,7 +570,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.SecondsBehindMaster.Int64 = 0 } // And until told otherwise: - instance.SlaveLagSeconds = instance.SecondsBehindMaster + instance.ReplicationLagSeconds = instance.SecondsBehindMaster instance.AllowTLS = (m.GetString("Master_SSL_Allowed") == "Yes") // Not breaking the flow even on error @@ -589,13 +589,13 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, waitGroup.Add(1) go func() { defer waitGroup.Done() - if err := db.QueryRow(config.Config.ReplicationLagQuery).Scan(&instance.SlaveLagSeconds); err == nil { - if instance.SlaveLagSeconds.Valid && instance.SlaveLagSeconds.Int64 < 0 { - log.Warningf("Host: %+v, instance.SlaveLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.SlaveLagSeconds.Int64) - instance.SlaveLagSeconds.Int64 = 0 + if err := db.QueryRow(config.Config.ReplicationLagQuery).Scan(&instance.ReplicationLagSeconds); err == nil { + if instance.ReplicationLagSeconds.Valid && instance.ReplicationLagSeconds.Int64 < 0 { + log.Warningf("Host: %+v, instance.SlaveLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.ReplicationLagSeconds.Int64) + instance.ReplicationLagSeconds.Int64 = 0 } } else { - instance.SlaveLagSeconds = instance.SecondsBehindMaster + instance.ReplicationLagSeconds = instance.SecondsBehindMaster logReadTopologyInstanceError(instanceKey, "ReplicationLagQuery", err) } }() @@ -1096,12 +1096,12 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.Binlog_format = m.GetString("binlog_format") instance.BinlogRowImage = m.GetString("binlog_row_image") instance.LogBinEnabled = m.GetBool("log_bin") - instance.LogSlaveUpdatesEnabled = m.GetBool("log_slave_updates") + instance.LogReplicationUpdatesEnabled = m.GetBool("log_slave_updates") instance.MasterKey.Hostname = m.GetString("master_host") instance.MasterKey.Port = m.GetInt("master_port") instance.IsDetachedMaster = instance.MasterKey.IsDetached() - instance.Slave_SQL_Running = m.GetBool("slave_sql_running") - instance.Slave_IO_Running = m.GetBool("slave_io_running") + instance.ReplicationSQLThreadRuning = m.GetBool("slave_sql_running") + instance.ReplicationIOThreadRuning = m.GetBool("slave_io_running") instance.ReplicationSQLThreadState = ReplicationThreadState(m.GetInt("replication_sql_thread_state")) instance.ReplicationIOThreadState = ReplicationThreadState(m.GetInt("replication_io_thread_state")) instance.HasReplicationFilters = m.GetBool("has_replication_filters") @@ -1128,9 +1128,9 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.LastSQLError = m.GetString("last_sql_error") instance.LastIOError = m.GetString("last_io_error") instance.SecondsBehindMaster = m.GetNullInt64("seconds_behind_master") - instance.SlaveLagSeconds = m.GetNullInt64("slave_lag_seconds") + instance.ReplicationLagSeconds = m.GetNullInt64("slave_lag_seconds") instance.SQLDelay = m.GetUint("sql_delay") - slaveHostsJSON := m.GetString("slave_hosts") + replicasJSON := m.GetString("slave_hosts") instance.ClusterName = m.GetString("cluster_name") instance.SuggestedClusterAlias = m.GetString("suggested_cluster_alias") instance.DataCenter = m.GetString("data_center") @@ -1166,7 +1166,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.InstanceAlias = m.GetString("instance_alias") instance.LastDiscoveryLatency = time.Duration(m.GetInt64("last_discovery_latency")) * time.Nanosecond - instance.SlaveHosts.ReadJson(slaveHostsJSON) + instance.Replicas.ReadJson(replicasJSON) instance.applyFlavorName() // problems @@ -1176,7 +1176,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.Problems = append(instance.Problems, "not_recently_checked") } else if instance.ReplicationThreadsExist() && !instance.ReplicaRunning() { instance.Problems = append(instance.Problems, "not_replicating") - } else if instance.SlaveLagSeconds.Valid && math.AbsInt64(instance.SlaveLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { + } else if instance.ReplicationLagSeconds.Valid && math.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { instance.Problems = append(instance.Problems, "replication_lag") } if instance.GtidErrant != "" { @@ -1583,7 +1583,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { if err != nil { return result, err } - sort.Sort(sort.Reverse(InstancesByCountSlaveHosts(intermediateMasters))) + sort.Sort(sort.Reverse(InstancesByCountReplicas(intermediateMasters))) intermediateMasters = filterOSCInstances(intermediateMasters) intermediateMasters = intermediateMasters[0:math.MinInt(2, len(intermediateMasters))] result = append(result, intermediateMasters...) @@ -1596,7 +1596,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { if err != nil { return result, err } - sort.Sort(sort.Reverse(InstancesByCountSlaveHosts(replicas))) + sort.Sort(sort.Reverse(InstancesByCountReplicas(replicas))) replicas = filterOSCInstances(replicas) replicas = replicas[0:math.MinInt(2, len(replicas))] result = append(result, replicas...) @@ -1609,7 +1609,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { if err != nil { return result, err } - sort.Sort(sort.Reverse(InstancesByCountSlaveHosts(replicas))) + sort.Sort(sort.Reverse(InstancesByCountReplicas(replicas))) replicas = filterOSCInstances(replicas) if len(replicas) > 0 { result = append(result, replicas[0]) @@ -1627,7 +1627,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { if err != nil { return result, err } - sort.Sort(sort.Reverse(InstancesByCountSlaveHosts(replicas))) + sort.Sort(sort.Reverse(InstancesByCountReplicas(replicas))) replicas = filterOSCInstances(replicas) replicas = replicas[0:math.MinInt(2, len(replicas))] result = append(result, replicas...) @@ -1676,7 +1676,7 @@ func GetClusterGhostReplicas(clusterName string) (result [](*Instance), err erro if !instance.LogBinEnabled { skipThisHost = true } - if !instance.LogSlaveUpdatesEnabled { + if !instance.LogReplicationUpdatesEnabled { skipThisHost = true } if !skipThisHost { @@ -1693,8 +1693,8 @@ func GetInstancesMaxLag(instances [](*Instance)) (maxLag int64, err error) { return 0, log.Errorf("No instances found in GetInstancesMaxLag") } for _, clusterInstance := range instances { - if clusterInstance.SlaveLagSeconds.Valid && clusterInstance.SlaveLagSeconds.Int64 > maxLag { - maxLag = clusterInstance.SlaveLagSeconds.Int64 + if clusterInstance.ReplicationLagSeconds.Valid && clusterInstance.ReplicationLagSeconds.Int64 > maxLag { + maxLag = clusterInstance.ReplicationLagSeconds.Int64 } } return maxLag, nil @@ -2472,13 +2472,13 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.Binlog_format) args = append(args, instance.BinlogRowImage) args = append(args, instance.LogBinEnabled) - args = append(args, instance.LogSlaveUpdatesEnabled) + args = append(args, instance.LogReplicationUpdatesEnabled) args = append(args, instance.SelfBinlogCoordinates.LogFile) args = append(args, instance.SelfBinlogCoordinates.LogPos) args = append(args, instance.MasterKey.Hostname) args = append(args, instance.MasterKey.Port) - args = append(args, instance.Slave_SQL_Running) - args = append(args, instance.Slave_IO_Running) + args = append(args, instance.ReplicationSQLThreadRuning) + args = append(args, instance.ReplicationIOThreadRuning) args = append(args, instance.ReplicationSQLThreadState) args = append(args, instance.ReplicationIOThreadState) args = append(args, instance.HasReplicationFilters) @@ -2501,10 +2501,10 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.LastSQLError) args = append(args, instance.LastIOError) args = append(args, instance.SecondsBehindMaster) - args = append(args, instance.SlaveLagSeconds) + args = append(args, instance.ReplicationLagSeconds) args = append(args, instance.SQLDelay) - args = append(args, len(instance.SlaveHosts)) - args = append(args, instance.SlaveHosts.ToJSONString()) + args = append(args, len(instance.Replicas)) + args = append(args, instance.Replicas.ToJSONString()) args = append(args, instance.ClusterName) args = append(args, instance.SuggestedClusterAlias) args = append(args, instance.DataCenter) diff --git a/go/inst/instance_test.go b/go/inst/instance_test.go index 178048e56..77a5fc9b5 100644 --- a/go/inst/instance_test.go +++ b/go/inst/instance_test.go @@ -119,9 +119,9 @@ func TestCanReplicateFrom(t *testing.T) { test.S(t).ExpectEquals(canReplicate, false) //binlog not yet enabled i55.LogBinEnabled = true - i55.LogSlaveUpdatesEnabled = true + i55.LogReplicationUpdatesEnabled = true i56.LogBinEnabled = true - i56.LogSlaveUpdatesEnabled = true + i56.LogReplicationUpdatesEnabled = true canReplicate, _ = i56.CanReplicateFrom(&i55) test.S(t).ExpectEquals(canReplicate, false) //serverid not set @@ -134,8 +134,8 @@ func TestCanReplicateFrom(t *testing.T) { canReplicate, _ = i55.CanReplicateFrom(&i56) test.S(t).ExpectFalse(canReplicate) - iStatement := Instance{Key: key1, Binlog_format: "STATEMENT", ServerID: 1, Version: "5.5", LogBinEnabled: true, LogSlaveUpdatesEnabled: true} - iRow := Instance{Key: key2, Binlog_format: "ROW", ServerID: 2, Version: "5.5", LogBinEnabled: true, LogSlaveUpdatesEnabled: true} + iStatement := Instance{Key: key1, Binlog_format: "STATEMENT", ServerID: 1, Version: "5.5", LogBinEnabled: true, LogReplicationUpdatesEnabled: true} + iRow := Instance{Key: key2, Binlog_format: "ROW", ServerID: 2, Version: "5.5", LogBinEnabled: true, LogReplicationUpdatesEnabled: true} canReplicate, err = iRow.CanReplicateFrom(&iStatement) test.S(t).ExpectNil(err) test.S(t).ExpectTrue(canReplicate) @@ -201,7 +201,7 @@ func TestHumanReadableDescription(t *testing.T) { i57.UsingPseudoGTID = true i57.LogBinEnabled = true i57.Binlog_format = "ROW" - i57.LogSlaveUpdatesEnabled = true + i57.LogReplicationUpdatesEnabled = true desc := i57.HumanReadableDescription() test.S(t).ExpectEquals(desc, "[unknown,invalid,5.7.8-log,rw,ROW,>>,P-GTID]") } @@ -217,7 +217,7 @@ func TestTabulatedDescription(t *testing.T) { i57.UsingPseudoGTID = true i57.LogBinEnabled = true i57.Binlog_format = "ROW" - i57.LogSlaveUpdatesEnabled = true + i57.LogReplicationUpdatesEnabled = true desc := i57.TabulatedDescription("|") test.S(t).ExpectEquals(desc, "unknown|invalid|5.7.8-log|rw|ROW|>>,P-GTID") } diff --git a/go/inst/instance_topology.go b/go/inst/instance_topology.go index 363aefd1c..3157968e6 100644 --- a/go/inst/instance_topology.go +++ b/go/inst/instance_topology.go @@ -37,7 +37,7 @@ type StopReplicationMethod string const ( NoStopReplication StopReplicationMethod = "NoStopReplication" StopReplicationNormal = "StopReplicationNormal" - StopReplicationNicely = "StopReplicationNicely" + StopReplicationNice = "StopReplicationNice" ) var ReplicationNotRunningError = fmt.Errorf("Replication not running") @@ -236,7 +236,7 @@ func MoveEquivalent(instanceKey, otherKey *InstanceKey) (*Instance, error) { // Now if we DO get to happen on equivalent coordinates, we need to double check. For CHANGE MASTER to happen we must // stop the replica anyhow. But then let's verify the position hasn't changed. knownExecBinlogCoordinates := instance.ExecBinlogCoordinates - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -248,7 +248,7 @@ func MoveEquivalent(instanceKey, otherKey *InstanceKey) (*Instance, error) { instance, err = ChangeMasterTo(instanceKey, otherKey, binlogCoordinates, false, GTIDHintNeutral) Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err == nil { message := fmt.Sprintf("moved %+v via equivalence coordinates below %+v", *instanceKey, *otherKey) @@ -306,19 +306,19 @@ func MoveUp(instanceKey *InstanceKey) (*Instance, error) { } if !instance.UsingMariaDBGTID { - master, err = StopSlave(&master.Key) + master, err = StopReplication(&master.Key) if err != nil { goto Cleanup } } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } if !instance.UsingMariaDBGTID { - instance, err = StartSlaveUntilMasterCoordinates(instanceKey, &master.SelfBinlogCoordinates) + instance, err = StartReplicationUntilMasterCoordinates(instanceKey, &master.SelfBinlogCoordinates) if err != nil { goto Cleanup } @@ -331,9 +331,9 @@ func MoveUp(instanceKey *InstanceKey) (*Instance, error) { } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if !instance.UsingMariaDBGTID { - master, _ = StartSlave(&master.Key) + master, _ = StartReplication(&master.Key) } if err != nil { return instance, log.Errore(err) @@ -396,7 +396,7 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I } } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -407,7 +407,7 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I go func() { defer func() { defer func() { barrier <- &replica.Key }() - StartSlave(&replica.Key) + StartReplication(&replica.Key) }() var replicaErr error @@ -425,12 +425,12 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I } } else { // Normal case. Do the math. - replica, err = StopSlave(&replica.Key) + replica, err = StopReplication(&replica.Key) if err != nil { replicaErr = err return } - replica, err = StartSlaveUntilMasterCoordinates(&replica.Key, &instance.SelfBinlogCoordinates) + replica, err = StartReplicationUntilMasterCoordinates(&replica.Key, &instance.SelfBinlogCoordinates) if err != nil { replicaErr = err return @@ -460,7 +460,7 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return res, instance, log.Errore(err), errs } @@ -523,22 +523,22 @@ func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) { defer EndMaintenance(maintenanceToken) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } - sibling, err = StopSlave(siblingKey) + sibling, err = StopReplication(siblingKey) if err != nil { goto Cleanup } if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) { - instance, err = StartSlaveUntilMasterCoordinates(instanceKey, &sibling.ExecBinlogCoordinates) + instance, err = StartReplicationUntilMasterCoordinates(instanceKey, &sibling.ExecBinlogCoordinates) if err != nil { goto Cleanup } } else if sibling.ExecBinlogCoordinates.SmallerThan(&instance.ExecBinlogCoordinates) { - sibling, err = StartSlaveUntilMasterCoordinates(siblingKey, &instance.ExecBinlogCoordinates) + sibling, err = StartReplicationUntilMasterCoordinates(siblingKey, &instance.ExecBinlogCoordinates) if err != nil { goto Cleanup } @@ -551,8 +551,8 @@ func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) { } Cleanup: - instance, _ = StartSlave(instanceKey) - sibling, _ = StartSlave(siblingKey) + instance, _ = StartReplication(instanceKey) + sibling, _ = StartReplication(siblingKey) if err != nil { return instance, log.Errore(err) @@ -626,7 +626,7 @@ func moveInstanceBelowViaGTID(instance, otherInstance *Instance) (*Instance, err defer EndMaintenance(maintenanceToken) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -636,7 +636,7 @@ func moveInstanceBelowViaGTID(instance, otherInstance *Instance) (*Instance, err goto Cleanup } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, log.Errore(err) } @@ -802,7 +802,7 @@ func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint Operatio defer EndMaintenance(maintenanceToken) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -819,7 +819,7 @@ func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint Operatio } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, log.Errore(err) } @@ -977,9 +977,9 @@ func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { // the coMaster used to be merely a replica. Just point master into *some* position // within coMaster... if master.IsReplica() { - // this is the case of a co-master. For masters, the StopSlave operation throws an error, and + // this is the case of a co-master. For masters, the StopReplication operation throws an error, and // there's really no point in doing it. - master, err = StopSlave(&master.Key) + master, err = StopReplication(&master.Key) if err != nil { goto Cleanup } @@ -1012,7 +1012,7 @@ func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { } Cleanup: - master, _ = StartSlave(&master.Key) + master, _ = StartReplication(&master.Key) if err != nil { return instance, log.Errore(err) } @@ -1022,8 +1022,8 @@ Cleanup: return instance, err } -// ResetSlaveOperation will reset a replica -func ResetSlaveOperation(instanceKey *InstanceKey) (*Instance, error) { +// ResetReplicationOperation will reset a replica +func ResetReplicationOperation(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, err @@ -1039,19 +1039,19 @@ func ResetSlaveOperation(instanceKey *InstanceKey) (*Instance, error) { } if instance.IsReplica() { - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } } - instance, err = ResetSlave(instanceKey) + instance, err = ResetReplication(instanceKey) if err != nil { goto Cleanup } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, log.Errore(err) @@ -1086,7 +1086,7 @@ func DetachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { defer EndMaintenance(maintenanceToken) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -1097,7 +1097,7 @@ func DetachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, log.Errore(err) } @@ -1131,7 +1131,7 @@ func ReattachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { defer EndMaintenance(maintenanceToken) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -1144,7 +1144,7 @@ func ReattachReplicaMasterHost(instanceKey *InstanceKey) (*Instance, error) { ReplaceAliasClusterName(instanceKey.StringCode(), reattachedMasterKey.StringCode()) Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, log.Errore(err) } @@ -1270,8 +1270,8 @@ func ErrantGTIDResetMaster(instanceKey *InstanceKey) (instance *Instance, err er if !instance.SupportsOracleGTID { return instance, log.Errorf("gtid-errant-reset-master requested for %+v but it is not using oracle-gtid", *instanceKey) } - if len(instance.SlaveHosts) > 0 { - return instance, log.Errorf("gtid-errant-reset-master will not operate on %+v because it has %+v replicas. Expecting no replicas", *instanceKey, len(instance.SlaveHosts)) + if len(instance.Replicas) > 0 { + return instance, log.Errorf("gtid-errant-reset-master will not operate on %+v because it has %+v replicas. Expecting no replicas", *instanceKey, len(instance.Replicas)) } gtidSubtract := "" @@ -1288,7 +1288,7 @@ func ErrantGTIDResetMaster(instanceKey *InstanceKey) (instance *Instance, err er } if instance.IsReplica() { - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -1350,9 +1350,9 @@ func ErrantGTIDResetMaster(instanceKey *InstanceKey) (instance *Instance, err er } Cleanup: - var startSlaveErr error - instance, startSlaveErr = StartSlave(instanceKey) - log.Errore(startSlaveErr) + var startReplicationErr error + instance, startReplicationErr = StartReplication(instanceKey) + log.Errore(startReplicationErr) if err != nil { return instance, log.Errore(err) @@ -1418,7 +1418,7 @@ func FindLastPseudoGTIDEntry(instance *Instance, recordedInstanceRelayLogCoordin return instancePseudoGtidCoordinates, instancePseudoGtidText, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID") } - if instance.LogBinEnabled && instance.LogSlaveUpdatesEnabled && !*config.RuntimeCLIFlags.SkipBinlogSearch && (expectedBinlogFormat == nil || instance.Binlog_format == *expectedBinlogFormat) { + if instance.LogBinEnabled && instance.LogReplicationUpdatesEnabled && !*config.RuntimeCLIFlags.SkipBinlogSearch && (expectedBinlogFormat == nil || instance.Binlog_format == *expectedBinlogFormat) { minBinlogCoordinates, _, _ := GetHeuristiclyRecentCoordinatesForInstance(&instance.Key) // Well no need to search this instance's binary logs if it doesn't have any... // With regard log-slave-updates, some edge cases are possible, like having this instance's log-slave-updates @@ -1572,7 +1572,7 @@ func MatchBelow(instanceKey, otherKey *InstanceKey, requireInstanceMaintenance b } log.Debugf("Stopping replica on %+v", *instanceKey) - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -1592,7 +1592,7 @@ func MatchBelow(instanceKey, otherKey *InstanceKey, requireInstanceMaintenance b } Cleanup: - instance, _ = StartSlave(instanceKey) + instance, _ = StartReplication(instanceKey) if err != nil { return instance, nextBinlogCoordinatesToMatch, log.Errore(err) } @@ -1744,16 +1744,16 @@ func TakeMaster(instanceKey *InstanceKey, allowTakingCoMaster bool) (*Instance, return instance, err } // We begin - masterInstance, err = StopSlave(&masterInstance.Key) + masterInstance, err = StopReplication(&masterInstance.Key) if err != nil { goto Cleanup } - instance, err = StopSlave(&instance.Key) + instance, err = StopReplication(&instance.Key) if err != nil { goto Cleanup } - instance, err = StartSlaveUntilMasterCoordinates(&instance.Key, &masterInstance.SelfBinlogCoordinates) + instance, err = StartReplicationUntilMasterCoordinates(&instance.Key, &masterInstance.SelfBinlogCoordinates) if err != nil { goto Cleanup } @@ -1775,10 +1775,10 @@ func TakeMaster(instanceKey *InstanceKey, allowTakingCoMaster bool) (*Instance, Cleanup: if instance != nil { - instance, _ = StartSlave(&instance.Key) + instance, _ = StartReplication(&instance.Key) } if masterInstance != nil { - masterInstance, _ = StartSlave(&masterInstance.Key) + masterInstance, _ = StartReplication(&masterInstance.Key) } if err != nil { return instance, err @@ -1823,7 +1823,7 @@ func MakeLocalMaster(instanceKey *InstanceKey) (*Instance, error) { } } - instance, err = StopSlaveNicely(instanceKey, 0) + instance, err = StopReplicationNicely(instanceKey, 0) if err != nil { goto Cleanup } @@ -1880,7 +1880,7 @@ func sortedReplicasDataCenterHint(replicas [](*Instance), stopReplicationMethod if len(replicas) <= 1 { return replicas } - replicas = StopSlaves(replicas, stopReplicationMethod, time.Duration(config.Config.InstanceBulkOperationsWaitTimeoutSeconds)*time.Second) + replicas = StopReplicas(replicas, stopReplicationMethod, time.Duration(config.Config.InstanceBulkOperationsWaitTimeoutSeconds)*time.Second) replicas = RemoveNilInstances(replicas) sortInstancesDataCenterHint(replicas, dataCenterHint) @@ -2061,7 +2061,7 @@ func isGenerallyValidAsBinlogSource(replica *Instance) bool { if !replica.LogBinEnabled { return false } - if !replica.LogSlaveUpdatesEnabled { + if !replica.LogReplicationUpdatesEnabled { return false } @@ -2091,7 +2091,7 @@ func isValidAsCandidateMasterInBinlogServerTopology(replica *Instance) bool { if !replica.LogBinEnabled { return false } - if replica.LogSlaveUpdatesEnabled { + if replica.LogReplicationUpdatesEnabled { // That's right: we *disallow* log-replica-updates return false } @@ -2227,7 +2227,7 @@ func GetCandidateReplica(masterKey *InstanceKey, forRematchPurposes bool) (*Inst } stopReplicationMethod := NoStopReplication if forRematchPurposes { - stopReplicationMethod = StopReplicationNicely + stopReplicationMethod = StopReplicationNice } replicas = sortedReplicasDataCenterHint(replicas, stopReplicationMethod, dataCenterHint) if err != nil { @@ -2340,7 +2340,7 @@ func RegroupReplicasPseudoGTID( go func() { defer func() { barrier <- &candidateReplica.Key }() ExecuteOnTopology(func() { - StartSlave(&replica.Key) + StartReplication(&replica.Key) }) }() } @@ -2429,7 +2429,7 @@ func RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers( return log.Errore(err) } log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: repointed candidate replica %+v under binlog server %+v", candidateReplica.Key, mostUpToDateBinlogServer.Key) - candidateReplica, err = StartSlaveUntilMasterCoordinates(&candidateReplica.Key, &mostUpToDateBinlogServer.ExecBinlogCoordinates) + candidateReplica, err = StartReplicationUntilMasterCoordinates(&candidateReplica.Key, &mostUpToDateBinlogServer.ExecBinlogCoordinates) if err != nil { return log.Errore(err) } @@ -2500,7 +2500,7 @@ func RegroupReplicasGTID( err = moveGTIDFunc() } - StartSlave(&candidateReplica.Key) + StartReplication(&candidateReplica.Key) log.Debugf("RegroupReplicasGTID: done") AuditOperation("regroup-replicas-gtid", masterKey, fmt.Sprintf("regrouped replicas of %+v via GTID; promoted %+v", *masterKey, candidateReplica.Key)) diff --git a/go/inst/instance_topology_dao.go b/go/inst/instance_topology_dao.go index 80973ee26..9598f7e25 100644 --- a/go/inst/instance_topology_dao.go +++ b/go/inst/instance_topology_dao.go @@ -129,31 +129,31 @@ func RefreshTopologyInstances(instances [](*Instance)) { } } -// GetSlaveRestartPreserveStatements returns a sequence of statements that make sure a replica is stopped +// GetReplicationRestartPreserveStatements returns a sequence of statements that make sure a replica is stopped // and then returned to the same state. For example, if the replica was fully running, this will issue // a STOP on both io_thread and sql_thread, followed by START on both. If one of them is not running // at the time this function is called, said thread will be neither stopped nor started. // The caller may provide an injected statememt, to be executed while the replica is stopped. // This is useful for CHANGE MASTER TO commands, that unfortunately must take place while the replica // is completely stopped. -func GetSlaveRestartPreserveStatements(instanceKey *InstanceKey, injectedStatement string) (statements []string, err error) { +func GetReplicationRestartPreserveStatements(instanceKey *InstanceKey, injectedStatement string) (statements []string, err error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return statements, err } - if instance.Slave_IO_Running { + if instance.ReplicationIOThreadRuning { statements = append(statements, SemicolonTerminated(`stop slave io_thread`)) } - if instance.Slave_SQL_Running { + if instance.ReplicationSQLThreadRuning { statements = append(statements, SemicolonTerminated(`stop slave sql_thread`)) } if injectedStatement != "" { statements = append(statements, SemicolonTerminated(injectedStatement)) } - if instance.Slave_SQL_Running { + if instance.ReplicationSQLThreadRuning { statements = append(statements, SemicolonTerminated(`start slave sql_thread`)) } - if instance.Slave_IO_Running { + if instance.ReplicationIOThreadRuning { statements = append(statements, SemicolonTerminated(`start slave io_thread`)) } return statements, err @@ -231,7 +231,7 @@ func SetSemiSyncReplica(instanceKey *InstanceKey, enableReplica bool) (*Instance if _, err := ExecInstance(instanceKey, "set @@global.rpl_semi_sync_slave_enabled=?", enableReplica); err != nil { return instance, log.Errore(err) } - if instance.Slave_IO_Running { + if instance.ReplicationIOThreadRuning { // Need to apply change by stopping starting IO thread ExecInstance(instanceKey, "stop slave io_thread") if _, err := ExecInstance(instanceKey, "start slave io_thread"); err != nil { @@ -252,10 +252,10 @@ func RestartReplicationQuick(instanceKey *InstanceKey) error { return nil } -// StopSlaveNicely stops a replica such that SQL_thread and IO_thread are aligned (i.e. +// StopReplicationNicely stops a replica such that SQL_thread and IO_thread are aligned (i.e. // SQL_thread consumes all relay log entries) // It will actually START the sql_thread even if the replica is completely stopped. -func StopSlaveNicely(instanceKey *InstanceKey, timeout time.Duration) (*Instance, error) { +func StopReplicationNicely(instanceKey *InstanceKey, timeout time.Duration) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, log.Errore(err) @@ -268,7 +268,7 @@ func StopSlaveNicely(instanceKey *InstanceKey, timeout time.Duration) (*Instance // stop io_thread, start sql_thread but catch any errors for _, cmd := range []string{`stop slave io_thread`, `start slave sql_thread`} { if _, err := ExecInstance(instanceKey, cmd); err != nil { - return nil, log.Errorf("%+v: StopSlaveNicely: '%q' failed: %+v", *instanceKey, cmd, err) + return nil, log.Errorf("%+v: StopReplicationNicely: '%q' failed: %+v", *instanceKey, cmd, err) } } @@ -291,7 +291,7 @@ func StopSlaveNicely(instanceKey *InstanceKey, timeout time.Duration) (*Instance } instance, err = ReadTopologyInstance(instanceKey) - log.Infof("Stopped slave nicely on %+v, Self:%+v, Exec:%+v", *instanceKey, instance.SelfBinlogCoordinates, instance.ExecBinlogCoordinates) + log.Infof("Stopped replication nicely on %+v, Self:%+v, Exec:%+v", *instanceKey, instance.SelfBinlogCoordinates, instance.ExecBinlogCoordinates) return instance, err } @@ -345,9 +345,9 @@ func WaitForSQLThreadUpToDate(instanceKey *InstanceKey, overallTimeout time.Dura } } -// StopSlaves will stop replication concurrently on given set of replicas. +// StopReplicas will stop replication concurrently on given set of replicas. // It will potentially do nothing, or attempt to stop _nicely_ or just stop normally, all according to stopReplicationMethod -func StopSlaves(replicas [](*Instance), stopReplicationMethod StopReplicationMethod, timeout time.Duration) [](*Instance) { +func StopReplicas(replicas [](*Instance), stopReplicationMethod StopReplicationMethod, timeout time.Duration) [](*Instance) { if stopReplicationMethod == NoStopReplication { return replicas } @@ -364,10 +364,10 @@ func StopSlaves(replicas [](*Instance), stopReplicationMethod StopReplicationMet defer func() { barrier <- *updatedReplica }() // Wait your turn to read a replica ExecuteOnTopology(func() { - if stopReplicationMethod == StopReplicationNicely { - StopSlaveNicely(&replica.Key, timeout) + if stopReplicationMethod == StopReplicationNice { + StopReplicationNicely(&replica.Key, timeout) } - replica, _ = StopSlave(&replica.Key) + replica, _ = StopReplication(&replica.Key) updatedReplica = &replica }) }() @@ -378,13 +378,13 @@ func StopSlaves(replicas [](*Instance), stopReplicationMethod StopReplicationMet return refreshedReplicas } -// StopSlavesNicely will attemt to stop all given replicas nicely, up to timeout -func StopSlavesNicely(replicas [](*Instance), timeout time.Duration) [](*Instance) { - return StopSlaves(replicas, StopReplicationNicely, timeout) +// StopReplicasNicely will attemt to stop all given replicas nicely, up to timeout +func StopReplicasNicely(replicas [](*Instance), timeout time.Duration) [](*Instance) { + return StopReplicas(replicas, StopReplicationNice, timeout) } -// StopSlave stops replication on a given instance -func StopSlave(instanceKey *InstanceKey) (*Instance, error) { +// StopReplication stops replication on a given instance +func StopReplication(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, log.Errore(err) @@ -432,8 +432,8 @@ func waitForReplicationState(instanceKey *InstanceKey, expectedState Replication return false, nil } -// StartSlave starts replication on a given instance. -func StartSlave(instanceKey *InstanceKey) (*Instance, error) { +// StartReplication starts replication on a given instance. +func StartReplication(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, log.Errore(err) @@ -475,18 +475,18 @@ func StartSlave(instanceKey *InstanceKey) (*Instance, error) { return instance, nil } -// RestartSlave stops & starts replication on a given instance -func RestartSlave(instanceKey *InstanceKey) (instance *Instance, err error) { - instance, err = StopSlave(instanceKey) +// RestartReplication stops & starts replication on a given instance +func RestartReplication(instanceKey *InstanceKey) (instance *Instance, err error) { + instance, err = StopReplication(instanceKey) if err != nil { return instance, log.Errore(err) } - instance, err = StartSlave(instanceKey) + instance, err = StartReplication(instanceKey) return instance, log.Errore(err) } -// StartSlaves will do concurrent start-slave -func StartSlaves(replicas [](*Instance)) { +// StartReplicas will do concurrent start-replica +func StartReplicas(replicas [](*Instance)) { // use concurrency but wait for all to complete log.Debugf("Starting %d replicas", len(replicas)) barrier := make(chan InstanceKey) @@ -496,7 +496,7 @@ func StartSlaves(replicas [](*Instance)) { // Signal compelted replica defer func() { barrier <- instance.Key }() // Wait your turn to read a replica - ExecuteOnTopology(func() { StartSlave(&instance.Key) }) + ExecuteOnTopology(func() { StartReplication(&instance.Key) }) }() } for range replicas { @@ -527,8 +527,8 @@ func WaitForExecBinlogCoordinatesToReach(instanceKey *InstanceKey, coordinates * return instance, exactMatch, err } -// StartSlaveUntilMasterCoordinates issuesa START SLAVE UNTIL... statement on given instance -func StartSlaveUntilMasterCoordinates(instanceKey *InstanceKey, masterCoordinates *BinlogCoordinates) (*Instance, error) { +// StartReplicationUntilMasterCoordinates issuesa START SLAVE UNTIL... statement on given instance +func StartReplicationUntilMasterCoordinates(instanceKey *InstanceKey, masterCoordinates *BinlogCoordinates) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, log.Errore(err) @@ -569,7 +569,7 @@ func StartSlaveUntilMasterCoordinates(instanceKey *InstanceKey, masterCoordinate return instance, fmt.Errorf("Start SLAVE UNTIL is past coordinates: %+v", instanceKey) } - instance, err = StopSlave(instanceKey) + instance, err = StopReplication(instanceKey) if err != nil { return instance, log.Errore(err) } @@ -577,13 +577,13 @@ func StartSlaveUntilMasterCoordinates(instanceKey *InstanceKey, masterCoordinate return instance, err } -// EnableSemiSync sets the rpl_semi_sync_(master|slave)_enabled variables +// EnableSemiSync sets the rpl_semi_sync_(master|replica)_enabled variables // on a given instance. -func EnableSemiSync(instanceKey *InstanceKey, master, slave bool) error { - log.Infof("instance %+v rpl_semi_sync_master_enabled: %t, rpl_semi_sync_slave_enabled: %t", instanceKey, master, slave) +func EnableSemiSync(instanceKey *InstanceKey, master, replica bool) error { + log.Infof("instance %+v rpl_semi_sync_master_enabled: %t, rpl_semi_sync_slave_enabled: %t", instanceKey, master, replica) _, err := ExecInstance(instanceKey, `set global rpl_semi_sync_master_enabled = ?, global rpl_semi_sync_slave_enabled = ?`, - master, slave) + master, replica) return err } @@ -796,22 +796,22 @@ func SkipToNextBinaryLog(instanceKey *InstanceKey) (*Instance, error) { return instance, log.Errore(err) } AuditOperation("skip-binlog", instanceKey, fmt.Sprintf("Skipped replication to next binary log: %+v", nextFileCoordinates.LogFile)) - return StartSlave(instanceKey) + return StartReplication(instanceKey) } -// ResetSlave resets a replica, breaking the replication -func ResetSlave(instanceKey *InstanceKey) (*Instance, error) { +// ResetReplication resets a replica, breaking the replication +func ResetReplication(instanceKey *InstanceKey) (*Instance, error) { instance, err := ReadTopologyInstance(instanceKey) if err != nil { return instance, log.Errore(err) } if instance.ReplicationThreadsExist() && !instance.ReplicationThreadsStopped() { - return instance, fmt.Errorf("Cannot reset slave on: %+v because replication threads are not stopped", instanceKey) + return instance, fmt.Errorf("Cannot reset replication on: %+v because replication threads are not stopped", instanceKey) } if *config.RuntimeCLIFlags.Noop { - return instance, fmt.Errorf("noop: aborting reset-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey) + return instance, fmt.Errorf("noop: aborting reset-replication operation on %+v; signalling error but nothing went wrong.", *instanceKey) } // MySQL's RESET SLAVE is done correctly; however SHOW SLAVE STATUS still returns old hostnames etc @@ -824,14 +824,14 @@ func ResetSlave(instanceKey *InstanceKey) (*Instance, error) { } _, err = ExecInstance(instanceKey, `reset slave /*!50603 all */`) if err != nil && strings.Contains(err.Error(), Error1201CouldnotInitializeMasterInfoStructure) { - log.Debugf("ResetSlave: got %+v", err) + log.Debugf("ResetReplication: got %+v", err) workaroundBug83713(instanceKey) _, err = ExecInstance(instanceKey, `reset slave /*!50603 all */`) } if err != nil { return instance, log.Errore(err) } - log.Infof("Reset slave %+v", instanceKey) + log.Infof("Reset replication %+v", instanceKey) instance, err = ReadTopologyInstance(instanceKey) return instance, err @@ -938,8 +938,8 @@ func SkipQuery(instanceKey *InstanceKey) (*Instance, error) { if !instance.IsReplica() { return instance, fmt.Errorf("instance is not a replica: %+v", instanceKey) } - if instance.Slave_SQL_Running { - return instance, fmt.Errorf("Slave SQL thread is running on %+v", instanceKey) + if instance.ReplicationSQLThreadRuning { + return instance, fmt.Errorf("Replication SQL thread is running on %+v", instanceKey) } if instance.LastSQLError == "" { return instance, fmt.Errorf("No SQL error on %+v", instanceKey) @@ -961,7 +961,7 @@ func SkipQuery(instanceKey *InstanceKey) (*Instance, error) { return instance, log.Errore(err) } AuditOperation("skip-query", instanceKey, "Skipped one query") - return StartSlave(instanceKey) + return StartReplication(instanceKey) } // MasterPosWait issues a MASTER_POS_WAIT() an given instance according to given coordinates. diff --git a/go/inst/instance_topology_test.go b/go/inst/instance_topology_test.go index 63aa117df..615dd37b1 100644 --- a/go/inst/instance_topology_test.go +++ b/go/inst/instance_topology_test.go @@ -47,7 +47,7 @@ func applyGeneralGoodToGoReplicationParams(instances [](*Instance)) { for _, instance := range instances { instance.IsLastCheckValid = true instance.LogBinEnabled = true - instance.LogSlaveUpdatesEnabled = true + instance.LogReplicationUpdatesEnabled = true } } @@ -229,7 +229,7 @@ func TestIsGenerallyValidAsCandidateReplica(t *testing.T) { for _, instance := range instances { instance.IsLastCheckValid = true instance.LogBinEnabled = true - instance.LogSlaveUpdatesEnabled = false + instance.LogReplicationUpdatesEnabled = false } for _, instance := range instances { test.S(t).ExpectFalse(isGenerallyValidAsCandidateReplica(instance)) @@ -274,7 +274,7 @@ func TestChooseCandidateReplicaNoCandidateReplica(t *testing.T) { for _, instance := range instances { instance.IsLastCheckValid = true instance.LogBinEnabled = true - instance.LogSlaveUpdatesEnabled = false + instance.LogReplicationUpdatesEnabled = false } _, _, _, _, _, err := chooseCandidateReplica(instances) test.S(t).ExpectNotNil(err) @@ -296,7 +296,7 @@ func TestChooseCandidateReplica(t *testing.T) { func TestChooseCandidateReplica2(t *testing.T) { instances, instancesMap := generateTestInstances() applyGeneralGoodToGoReplicationParams(instances) - instancesMap[i830Key.StringCode()].LogSlaveUpdatesEnabled = false + instancesMap[i830Key.StringCode()].LogReplicationUpdatesEnabled = false instancesMap[i820Key.StringCode()].LogBinEnabled = false instances = sortedReplicas(instances, NoStopReplication) candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) diff --git a/go/inst/instance_utils.go b/go/inst/instance_utils.go index 755a65958..161a13c6c 100644 --- a/go/inst/instance_utils.go +++ b/go/inst/instance_utils.go @@ -122,7 +122,7 @@ func (this *InstancesSorterByExec) Less(i, j int) bool { } if this.instances[i].ExecBinlogCoordinates.Equals(&this.instances[j].ExecBinlogCoordinates) { // Secondary sorting: "smaller" if not logging replica updates - if this.instances[j].LogSlaveUpdatesEnabled && !this.instances[i].LogSlaveUpdatesEnabled { + if this.instances[j].LogReplicationUpdatesEnabled && !this.instances[i].LogReplicationUpdatesEnabled { return true } // Next sorting: "smaller" if of higher version (this will be reversed eventually) diff --git a/go/logic/orchestrator.go b/go/logic/orchestrator.go index f9ea3dfeb..0fc4ff1bc 100644 --- a/go/logic/orchestrator.go +++ b/go/logic/orchestrator.go @@ -280,7 +280,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { } // Investigate replicas: - for _, replicaKey := range instance.SlaveHosts.GetInstanceKeys() { + for _, replicaKey := range instance.Replicas.GetInstanceKeys() { replicaKey := replicaKey // not needed? no concurrency here? // Avoid noticing some hosts we would otherwise discover diff --git a/go/logic/topology_recovery.go b/go/logic/topology_recovery.go index 7680de2f8..bfab1b9f8 100644 --- a/go/logic/topology_recovery.go +++ b/go/logic/topology_recovery.go @@ -173,11 +173,11 @@ type InstancesByCountReplicas [](*inst.Instance) func (this InstancesByCountReplicas) Len() int { return len(this) } func (this InstancesByCountReplicas) Swap(i, j int) { this[i], this[j] = this[j], this[i] } func (this InstancesByCountReplicas) Less(i, j int) bool { - if len(this[i].SlaveHosts) == len(this[j].SlaveHosts) { + if len(this[i].Replicas) == len(this[j].Replicas) { // Secondary sorting: prefer more advanced replicas return !this[i].ExecBinlogCoordinates.SmallerThan(&this[j].ExecBinlogCoordinates) } - return len(this[i].SlaveHosts) < len(this[j].SlaveHosts) + return len(this[i].Replicas) < len(this[j].Replicas) } var recoverDeadMasterCounter = metrics.NewCounter() @@ -291,8 +291,8 @@ func prepareCommand(command string, topologyRecovery *TopologyRecovery) (result command = strings.Replace(command, "{lostSlaves}", topologyRecovery.LostReplicas.ToCommaDelimitedList(), -1) command = strings.Replace(command, "{lostReplicas}", topologyRecovery.LostReplicas.ToCommaDelimitedList(), -1) command = strings.Replace(command, "{countLostReplicas}", fmt.Sprintf("%d", len(topologyRecovery.LostReplicas)), -1) - command = strings.Replace(command, "{slaveHosts}", analysisEntry.SlaveHosts.ToCommaDelimitedList(), -1) - command = strings.Replace(command, "{replicaHosts}", analysisEntry.SlaveHosts.ToCommaDelimitedList(), -1) + command = strings.Replace(command, "{slaveHosts}", analysisEntry.Replicas.ToCommaDelimitedList(), -1) + command = strings.Replace(command, "{replicaHosts}", analysisEntry.Replicas.ToCommaDelimitedList(), -1) return command, async } @@ -319,7 +319,7 @@ func applyEnvironmentVariables(topologyRecovery *TopologyRecovery) []string { env = append(env, fmt.Sprintf("ORC_ORCHESTRATOR_HOST=%s", process.ThisHostname)) env = append(env, fmt.Sprintf("ORC_IS_SUCCESSFUL=%v", (topologyRecovery.SuccessorKey != nil))) env = append(env, fmt.Sprintf("ORC_LOST_REPLICAS=%s", topologyRecovery.LostReplicas.ToCommaDelimitedList())) - env = append(env, fmt.Sprintf("ORC_REPLICA_HOSTS=%s", analysisEntry.SlaveHosts.ToCommaDelimitedList())) + env = append(env, fmt.Sprintf("ORC_REPLICA_HOSTS=%s", analysisEntry.Replicas.ToCommaDelimitedList())) env = append(env, fmt.Sprintf("ORC_RECOVERY_UID=%s", topologyRecovery.UID)) if topologyRecovery.SuccessorKey != nil { @@ -393,7 +393,7 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) if err != nil { return nil, log.Errore(err) } - promotedBinlogServer, err = inst.StopSlave(&promotedBinlogServer.Key) + promotedBinlogServer, err = inst.StopReplication(&promotedBinlogServer.Key) if err != nil { return promotedReplica, log.Errore(err) } @@ -403,20 +403,20 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) return promotedReplica, log.Errore(err) } // Align it with binlog server coordinates - promotedReplica, err = inst.StopSlave(&promotedReplica.Key) + promotedReplica, err = inst.StopReplication(&promotedReplica.Key) if err != nil { return promotedReplica, log.Errore(err) } - promotedReplica, err = inst.StartSlaveUntilMasterCoordinates(&promotedReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) + promotedReplica, err = inst.StartReplicationUntilMasterCoordinates(&promotedReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) if err != nil { return promotedReplica, log.Errore(err) } - promotedReplica, err = inst.StopSlave(&promotedReplica.Key) + promotedReplica, err = inst.StopReplication(&promotedReplica.Key) if err != nil { return promotedReplica, log.Errore(err) } // Detach, flush binary logs forward - promotedReplica, err = inst.ResetSlave(&promotedReplica.Key) + promotedReplica, err = inst.ResetReplication(&promotedReplica.Key) if err != nil { return promotedReplica, log.Errore(err) } @@ -457,14 +457,14 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) return } postponedFunction := func() error { - binlogServerReplica, err := inst.StopSlave(&binlogServerReplica.Key) + binlogServerReplica, err := inst.StopReplication(&binlogServerReplica.Key) if err != nil { return err } // Make sure the BLS has the "next binlog" -- the one the master flushed & purged to. Otherwise the BLS // will request a binlog the master does not have if binlogServerReplica.ExecBinlogCoordinates.SmallerThan(&promotedBinlogServer.ExecBinlogCoordinates) { - binlogServerReplica, err = inst.StartSlaveUntilMasterCoordinates(&binlogServerReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) + binlogServerReplica, err = inst.StartReplicationUntilMasterCoordinates(&binlogServerReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) if err != nil { return err } @@ -850,9 +850,9 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate return nil, fmt.Errorf("RecoverDeadMaster: failed %+v promotion; %s", promotedReplica.Key, reason) } if config.Config.FailMasterPromotionOnLagMinutes > 0 && - time.Duration(promotedReplica.SlaveLagSeconds.Int64)*time.Second >= time.Duration(config.Config.FailMasterPromotionOnLagMinutes)*time.Minute { + time.Duration(promotedReplica.ReplicationLagSeconds.Int64)*time.Second >= time.Duration(config.Config.FailMasterPromotionOnLagMinutes)*time.Minute { // candidate replica lags too much - return nil, fmt.Errorf("RecoverDeadMaster: failed promotion. FailMasterPromotionOnLagMinutes is set to %d (minutes) and promoted replica %+v 's lag is %d (seconds)", config.Config.FailMasterPromotionOnLagMinutes, promotedReplica.Key, promotedReplica.SlaveLagSeconds.Int64) + return nil, fmt.Errorf("RecoverDeadMaster: failed promotion. FailMasterPromotionOnLagMinutes is set to %d (minutes) and promoted replica %+v 's lag is %d (seconds)", config.Config.FailMasterPromotionOnLagMinutes, promotedReplica.Key, promotedReplica.ReplicationLagSeconds.Int64) } if config.Config.FailMasterPromotionIfSQLThreadNotUpToDate && !promotedReplica.SQLThreadUpToDate() { return nil, fmt.Errorf("RecoverDeadMaster: failed promotion. FailMasterPromotionIfSQLThreadNotUpToDate is set and promoted replica %+v 's sql thread is not up to date (relay logs still unapplied). Aborting promotion", promotedReplica.Key) @@ -883,10 +883,10 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate // on GracefulMasterTakeoverCommandHint it makes utter sense to RESET SLAVE ALL and read_only=0, and there is no sense in not doing so. AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: will apply MySQL changes to promoted master")) { - _, err := inst.ResetSlaveOperation(&promotedReplica.Key) + _, err := inst.ResetReplicationOperation(&promotedReplica.Key) if err != nil { // Ugly, but this is important. Let's give it another try - _, err = inst.ResetSlaveOperation(&promotedReplica.Key) + _, err = inst.ResetReplicationOperation(&promotedReplica.Key) } AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: applying RESET SLAVE ALL on promoted master: success=%t", (err == nil))) if err != nil { @@ -964,7 +964,7 @@ func isGenerallyValidAsCandidateSiblingOfIntermediateMaster(sibling *inst.Instan if !sibling.LogBinEnabled { return false } - if !sibling.LogSlaveUpdatesEnabled { + if !sibling.LogReplicationUpdatesEnabled { return false } if !sibling.ReplicaRunning() { @@ -1002,7 +1002,7 @@ func isValidAsCandidateSiblingOfIntermediateMaster(intermediateMasterInstance *i return true } -func isGenerallyValidAsWouldBeMaster(replica *inst.Instance, requireLogSlaveUpdates bool) bool { +func isGenerallyValidAsWouldBeMaster(replica *inst.Instance, requireLogReplicationUpdates bool) bool { if !replica.IsLastCheckValid { // something wrong with this replica right now. We shouldn't hope to be able to promote it return false @@ -1010,7 +1010,7 @@ func isGenerallyValidAsWouldBeMaster(replica *inst.Instance, requireLogSlaveUpda if !replica.LogBinEnabled { return false } - if requireLogSlaveUpdates && !replica.LogSlaveUpdatesEnabled { + if requireLogReplicationUpdates && !replica.LogReplicationUpdatesEnabled { return false } if replica.IsBinlogServer() { @@ -1436,7 +1436,7 @@ func emergentlyReadTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analy } } -// emergentlyRestartReplicationOnTopologyInstance forces a RestartSlave on a given instance. +// emergentlyRestartReplicationOnTopologyInstance forces a RestartReplication on a given instance. func emergentlyRestartReplicationOnTopologyInstance(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { // Just recently attempted on this specific replica @@ -1464,7 +1464,7 @@ func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { // that's where we hope they realize the master is bad. func emergentlyRestartReplicationOnTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { - // While each replica's RestartSlave() is throttled on its own, it's also wasteful to + // While each replica's RestartReplication() is throttled on its own, it's also wasteful to // iterate all replicas all the time. This is the reason why we do grand-throttle check. return } @@ -1553,7 +1553,7 @@ func getCheckAndRecoverFunction(analysisCode inst.AnalysisCode, analyzedInstance } // Right now this is mostly causing noise with no clear action. // Will revisit this in the future. - // case inst.AllMasterSlavesStale: + // case inst.AllMasterReplicasStale: // return checkAndRecoverGenericProblem, false return nil, false @@ -1849,7 +1849,7 @@ func getGracefulMasterTakeoverDesignatedInstance(clusterMasterKey *inst.Instance return nil, fmt.Errorf("GracefulMasterTakeover: no target instance indicated, failed to auto-detect candidate replica for master %+v. Aborting", *clusterMasterKey) } log.Debugf("GracefulMasterTakeover: candidateReplica=%+v", designatedInstance.Key) - if _, err := inst.StartSlave(&designatedInstance.Key); err != nil { + if _, err := inst.StartReplication(&designatedInstance.Key); err != nil { return nil, fmt.Errorf("GracefulMasterTakeover:cannot start replication on designated replica %+v. Aborting", designatedKey) } log.Infof("GracefulMasterTakeover: designated master deduced to be %+v", designatedInstance.Key) @@ -2003,7 +2003,7 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, } } if auto { - _, startReplicationErr := inst.StartSlave(&clusterMaster.Key) + _, startReplicationErr := inst.StartReplication(&clusterMaster.Key) if err == nil { err = startReplicationErr } diff --git a/go/logic/topology_recovery_dao.go b/go/logic/topology_recovery_dao.go index a36c63ef7..12af9853e 100644 --- a/go/logic/topology_recovery_dao.go +++ b/go/logic/topology_recovery_dao.go @@ -41,7 +41,7 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis analysisEntry.ClusterDetails.ClusterName, analysisEntry.ClusterDetails.ClusterAlias, analysisEntry.CountReplicas, - analysisEntry.SlaveHosts.ToCommaDelimitedList(), + analysisEntry.Replicas.ToCommaDelimitedList(), analysisEntry.IsActionableRecovery, ) startActivePeriodHint := "now()" @@ -182,7 +182,7 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover string(analysisEntry.Analysis), analysisEntry.ClusterDetails.ClusterName, analysisEntry.ClusterDetails.ClusterAlias, - analysisEntry.CountReplicas, analysisEntry.SlaveHosts.ToCommaDelimitedList(), + analysisEntry.CountReplicas, analysisEntry.Replicas.ToCommaDelimitedList(), analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, ) if err != nil {