本文整理汇总了Golang中github.com/outbrain/orchestrator/Godeps/_workspace/src/github.com/outbrain/golib/log.Errore函数的典型用法代码示例。如果您正苦于以下问题:Golang Errore函数的具体用法?Golang Errore怎么用?Golang Errore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errore函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: HealthTest
// HealthTest attempts to write to the backend database and get a result
func HealthTest() (*HealthStatus, error) {
health := HealthStatus{Healthy: false, Hostname: ThisHostname, Token: ProcessToken.Hash}
sqlResult, err := RegisterNode("", "", false)
if err != nil {
health.Error = err
return &health, log.Errore(err)
}
rows, err := sqlResult.RowsAffected()
if err != nil {
health.Error = err
return &health, log.Errore(err)
}
health.Healthy = (rows > 0)
activeHostname, activeToken, isActive, err := ElectedNode()
if err != nil {
health.Error = err
return &health, log.Errore(err)
}
health.ActiveNode = fmt.Sprintf("%s;%s", activeHostname, activeToken)
health.IsActiveNode = isActive
health.AvailableNodes, err = readAvailableNodes(true)
return &health, nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:27,代码来源:health_dao.go
示例2: StopSlave
// StopSlave stops replication on a given instance
func StopSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, `stop slave`)
if err != nil {
// Patch; current MaxScale behavior for STOP SLAVE is to throw an error if slave already stopped.
if instance.isMaxScale() && err.Error() == "Error 1199: Slave connection is not running" {
err = nil
}
}
if err != nil {
return instance, log.Errore(err)
}
instance, err = ReadTopologyInstance(instanceKey)
log.Infof("Stopped slave on %+v, Self:%+v, Exec:%+v", *instanceKey, instance.SelfBinlogCoordinates, instance.ExecBinlogCoordinates)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:26,代码来源:instance_topology_dao.go
示例3: writePoolInstances
// writePoolInstances will write (and override) a single cluster name mapping
func writePoolInstances(pool string, instanceKeys [](*InstanceKey)) error {
writeFunc := func() error {
db, err := db.OpenOrchestrator()
if err != nil {
return log.Errore(err)
}
tx, err := db.Begin()
stmt, err := tx.Prepare(`delete from database_instance_pool where pool = ?`)
_, err = stmt.Exec(pool)
if err != nil {
tx.Rollback()
return log.Errore(err)
}
stmt, err = tx.Prepare(`insert into database_instance_pool values (?, ?, ?)`)
for _, instanceKey := range instanceKeys {
_, err := stmt.Exec(instanceKey.Hostname, instanceKey.Port, pool)
if err != nil {
tx.Rollback()
return log.Errore(err)
}
}
if err != nil {
tx.Rollback()
return log.Errore(err)
}
tx.Commit()
return nil
}
return ExecDBWriteFunc(writeFunc)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:33,代码来源:pool_dao.go
示例4: SkipQuery
// SkipQuery skip a single query in a failed replication instance
func SkipQuery(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
if instance.Slave_SQL_Running {
return instance, fmt.Errorf("Slave SQL thread is running on %+v", instanceKey)
}
if instance.LastSQLError == "" {
return instance, fmt.Errorf("No SQL error on %+v", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting skip-query operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
log.Debugf("Skipping one query on %+v", instanceKey)
if instance.UsingOracleGTID {
err = skipQueryOracleGtid(instance)
} else if instance.UsingMariaDBGTID {
return instance, log.Errorf("%+v is replicating with MariaDB GTID. To skip a query first disable GTID, then skip, then enable GTID again", *instanceKey)
} else {
err = skipQueryClassic(instance)
}
if err != nil {
return instance, log.Errore(err)
}
AuditOperation("skip-query", instanceKey, "Skipped one query")
return StartSlave(instanceKey)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:35,代码来源:instance_topology_dao.go
示例5: ResetSlave
// ResetSlave resets a slave, breaking the replication
func ResetSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot reset slave on: %+v because slave is running", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting reset-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
// MySQL's RESET SLAVE is done correctly; however SHOW SLAVE STATUS still returns old hostnames etc
// and only resets till after next restart. This leads to orchestrator still thinking the instance replicates
// from old host. We therefore forcibly modify the hostname.
// RESET SLAVE ALL command solves this, but only as of 5.6.3
_, err = ExecInstanceNoPrepare(instanceKey, `change master to master_host='_'`)
if err != nil {
return instance, log.Errore(err)
}
_, err = ExecInstanceNoPrepare(instanceKey, `reset slave /*!50603 all */`)
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Reset slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:32,代码来源:instance_topology_dao.go
示例6: ReattachSlave
// ReattachSlave restores a detached slave back into replication
func ReattachSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot (need not) reattach slave on: %+v because slave is running", instanceKey)
}
isDetached, detachedLogFile, detachedLogPos := instance.ExecBinlogCoordinates.DetachedCoordinates()
if !isDetached {
return instance, fmt.Errorf("Cannot reattach slave on: %+v because slave is not detached", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting reattach-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf(`change master to master_log_file='%s', master_log_pos=%s`, detachedLogFile, detachedLogPos))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Reattach slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:31,代码来源:instance_topology_dao.go
示例7: DetachSlave
// DetachSlave detaches a slave from replication; forcibly corrupting the binlog coordinates (though in such way
// that is reversible)
func DetachSlave(instanceKey *InstanceKey) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SlaveRunning() {
return instance, fmt.Errorf("Cannot detach slave on: %+v because slave is running", instanceKey)
}
isDetached, _, _ := instance.ExecBinlogCoordinates.DetachedCoordinates()
if isDetached {
return instance, fmt.Errorf("Cannot (need not) detach slave on: %+v because slave is already detached", instanceKey)
}
if *config.RuntimeCLIFlags.Noop {
return instance, fmt.Errorf("noop: aborting detach-slave operation on %+v; signalling error but nothing went wrong.", *instanceKey)
}
detachedCoordinates := BinlogCoordinates{LogFile: fmt.Sprintf("//%s:%d", instance.ExecBinlogCoordinates.LogFile, instance.ExecBinlogCoordinates.LogPos), LogPos: instance.ExecBinlogCoordinates.LogPos}
// Encode the current coordinates within the log file name, in such way that replication is broken, but info can still be resurrected
_, err = ExecInstanceNoPrepare(instanceKey, fmt.Sprintf(`change master to master_log_file='%s', master_log_pos=%d`, detachedCoordinates.LogFile, detachedCoordinates.LogPos))
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Detach slave %+v", instanceKey)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:34,代码来源:instance_topology_dao.go
示例8: UnresolveHostname
func UnresolveHostname(instanceKey *InstanceKey) (InstanceKey, bool, error) {
if *config.RuntimeCLIFlags.SkipUnresolve {
return *instanceKey, false, nil
}
unresolvedHostname, err := readUnresolvedHostname(instanceKey.Hostname)
if err != nil {
return *instanceKey, false, log.Errore(err)
}
if unresolvedHostname == instanceKey.Hostname {
// unchanged. Nothing to do
return *instanceKey, false, nil
}
// We unresovled to a different hostname. We will now re-resolve to double-check!
unresolvedKey := &InstanceKey{Hostname: unresolvedHostname, Port: instanceKey.Port}
instance, err := ReadTopologyInstance(unresolvedKey)
if err != nil {
return *instanceKey, false, log.Errore(err)
}
if instance.IsBinlogServer() && config.Config.SkipBinlogServerUnresolveCheck {
// Do nothing. Everything is assumed to be fine.
} else if instance.Key.Hostname != instanceKey.Hostname {
// Resolve(Unresolve(hostname)) != hostname ==> Bad; reject
if *config.RuntimeCLIFlags.SkipUnresolveCheck {
return *instanceKey, false, nil
}
return *instanceKey, false, log.Errorf("Error unresolving; hostname=%s, unresolved=%s, re-resolved=%s; mismatch. Skip/ignore with --skip-unresolve-check", instanceKey.Hostname, unresolvedKey.Hostname, instance.Key.Hostname)
}
return *unresolvedKey, true, nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:30,代码来源:resolve.go
示例9: acknowledgeRecoveries
// acknowledgeRecoveries sets acknowledged* details and clears the in_active_period flags from a set of entries
func acknowledgeRecoveries(owner string, comment string, markEndRecovery bool, whereClause string, args []interface{}) (countAcknowledgedEntries int64, err error) {
additionalSet := ``
if markEndRecovery {
additionalSet = `
end_recovery=IFNULL(end_recovery, NOW()),
`
}
query := fmt.Sprintf(`
update topology_recovery set
in_active_period = 0,
end_active_period_unixtime = IF(end_active_period_unixtime = 0, UNIX_TIMESTAMP(), end_active_period_unixtime),
%s
acknowledged = 1,
acknowledged_at = NOW(),
acknowledged_by = ?,
acknowledge_comment = ?
where
acknowledged = 0
and
%s
`, additionalSet, whereClause)
args = append(sqlutils.Args(owner, comment), args...)
sqlResult, err := db.ExecOrchestrator(query, args...)
if err != nil {
return 0, log.Errore(err)
}
rows, err := sqlResult.RowsAffected()
return rows, log.Errore(err)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:30,代码来源:topology_recovery_dao.go
示例10: ExpireBlockedRecoveries
// ExpireBlockedRecoveries clears listing of blocked recoveries that are no longer actually blocked.
func ExpireBlockedRecoveries() error {
// Older recovery is acknowledged by now, hence blocked recovery should be released.
// Do NOTE that the data in blocked_topology_recovery is only used for auditing: it is NOT the data
// based on which we make automated decisions.
_, err := db.ExecOrchestrator(`
delete
from blocked_topology_recovery
using
blocked_topology_recovery
left join topology_recovery on (blocking_recovery_id = topology_recovery.recovery_id and acknowledged = 0)
where
acknowledged is null
`,
)
if err != nil {
return log.Errore(err)
}
// Some oversampling, if a problem has not been noticed for some time (e.g. the server came up alive
// before action was taken), expire it.
// Recall that RegisterBlockedRecoveries continuously updates the last_blocked_timestamp column.
_, err = db.ExecOrchestrator(`
delete
from blocked_topology_recovery
where
last_blocked_timestamp < NOW() - interval ? second
`, (config.Config.RecoveryPollSeconds * 2),
)
if err != nil {
return log.Errore(err)
}
return nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:33,代码来源:topology_recovery_dao.go
示例11: WriteLongRunningProcesses
// WriteLongRunningProcesses rewrites current state of long running processes for given instance
func WriteLongRunningProcesses(instanceKey *InstanceKey, processes []Process) error {
writeFunc := func() error {
_, err := db.ExecOrchestrator(`
delete from
database_instance_long_running_queries
where
hostname = ?
and port = ?
`,
instanceKey.Hostname,
instanceKey.Port)
if err != nil {
return log.Errore(err)
}
for _, process := range processes {
_, merr := db.ExecOrchestrator(`
insert into database_instance_long_running_queries (
hostname,
port,
process_id,
process_started_at,
process_user,
process_host,
process_db,
process_command,
process_time_seconds,
process_state,
process_info
) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
instanceKey.Hostname,
instanceKey.Port,
process.Id,
process.StartedAt,
process.User,
process.Host,
process.Db,
process.Command,
process.Time,
process.State,
process.Info,
)
if merr != nil {
err = merr
}
}
if err != nil {
return log.Errore(err)
}
return nil
}
return ExecDBWriteFunc(writeFunc)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:55,代码来源:process_dao.go
示例12: RestartSlave
// RestartSlave stops & starts replication on a given instance
func RestartSlave(instanceKey *InstanceKey) (instance *Instance, err error) {
instance, err = StopSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
instance, err = StartSlave(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
return instance, nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:13,代码来源:instance_topology_dao.go
示例13: CommandRun
// CommandRun executes a command
func CommandRun(commandText string, arguments ...string) error {
cmd, tmpFileName, err := execCmd(commandText, arguments...)
defer os.Remove(tmpFileName)
if err != nil {
return log.Errore(err)
}
err = cmd.Run()
if err != nil {
return log.Errore(err)
}
return nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:13,代码来源:process.go
示例14: AuditOperation
// AuditOperation creates and writes a new audit entry by given params
func AuditOperation(auditType string, instanceKey *InstanceKey, message string) error {
if instanceKey == nil {
instanceKey = &InstanceKey{}
}
clusterName := ""
if instanceKey.Hostname != "" {
clusterName, _ = GetClusterName(instanceKey)
}
if config.Config.AuditLogFile != "" {
go func() error {
f, err := os.OpenFile(config.Config.AuditLogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return log.Errore(err)
}
defer f.Close()
text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s]\t%s\t\n", time.Now().Format(log.TimeFormat), auditType, instanceKey.Hostname, instanceKey.Port, clusterName, message)
if _, err = f.WriteString(text); err != nil {
return log.Errore(err)
}
return nil
}()
}
_, err := db.ExecOrchestrator(`
insert
into audit (
audit_timestamp, audit_type, hostname, port, cluster_name, message
) VALUES (
NOW(), ?, ?, ?, ?, ?
)
`,
auditType,
instanceKey.Hostname,
instanceKey.Port,
clusterName,
message,
)
if err != nil {
return log.Errore(err)
}
logMessage := fmt.Sprintf("auditType:%s instance:%s cluster:%s message:%s", auditType, instanceKey.DisplayString(), clusterName, message)
if syslogWriter != nil {
go func() {
syslogWriter.Info(logMessage)
}()
}
log.Debugf(logMessage)
auditOperationCounter.Inc(1)
return err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:54,代码来源:audit_dao.go
示例15: auditInstanceAnalysisInChangelog
// auditInstanceAnalysisInChangelog will write down an instance's analysis in the database_instance_analysis_changelog table.
// To not repeat recurring analysis code, the database_instance_last_analysis table is used, so that only changes to
// analysis codes are written.
func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode AnalysisCode) error {
if lastWrittenAnalysis, found := recentInstantAnalysis.Get(instanceKey.DisplayString()); found {
if lastWrittenAnalysis == analysisCode {
// Surely nothing new.
// And let's expand the timeout
recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration)
return nil
}
}
// Passed the cache; but does database agree that there's a change? Here's a persistent cache; this comes here
// to verify no two orchestrator services are doing this without coordinating (namely, one dies, the other taking its place
// and has no familiarity of the former's cache)
analysisChangeWriteAttemptCounter.Inc(1)
sqlResult, err := db.ExecOrchestrator(`
insert ignore into database_instance_last_analysis (
hostname, port, analysis_timestamp, analysis
) values (
?, ?, now(), ?
) on duplicate key update
analysis = values(analysis),
analysis_timestamp = if(analysis = values(analysis), analysis_timestamp, values(analysis_timestamp))
`,
instanceKey.Hostname, instanceKey.Port, string(analysisCode),
)
if err != nil {
return log.Errore(err)
}
rows, err := sqlResult.RowsAffected()
if err != nil {
return log.Errore(err)
}
recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration)
lastAnalysisChanged := (rows > 0)
if !lastAnalysisChanged {
return nil
}
_, err = db.ExecOrchestrator(`
insert into database_instance_analysis_changelog (
hostname, port, analysis_timestamp, analysis
) values (
?, ?, now(), ?
)
`,
instanceKey.Hostname, instanceKey.Port, string(analysisCode),
)
if err == nil {
analysisChangeWriteCounter.Inc(1)
}
return log.Errore(err)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:55,代码来源:analysis_dao.go
示例16: pollAgent
func pollAgent(hostname string) error {
polledAgent, err := agent.GetAgent(hostname)
agent.UpdateAgentLastChecked(hostname)
if err != nil {
return log.Errore(err)
}
err = agent.UpdateAgentInfo(hostname, polledAgent)
if err != nil {
return log.Errore(err)
}
return nil
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:15,代码来源:orchestrator.go
示例17: MasterPosWait
// MasterPosWait issues a MASTER_POS_WAIT() an given instance according to given coordinates.
func MasterPosWait(instanceKey *InstanceKey, binlogCoordinates *BinlogCoordinates) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
_, err = ExecInstance(instanceKey, `select master_pos_wait(?, ?)`, binlogCoordinates.LogFile, binlogCoordinates.LogPos)
if err != nil {
return instance, log.Errore(err)
}
log.Infof("Instance %+v has reached coordinates: %+v", instanceKey, binlogCoordinates)
instance, err = ReadTopologyInstance(instanceKey)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:16,代码来源:instance_topology_dao.go
示例18: StopSlaveNicely
// StopSlaveNicely stops a slave such that SQL_thread and IO_thread are aligned (i.e.
// SQL_thread consumes all relay log entries)
// It will actually START the sql_thread even if the slave is completely stopped.
func StopSlaveNicely(instanceKey *InstanceKey, timeout time.Duration) (*Instance, error) {
instance, err := ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if !instance.IsSlave() {
return instance, fmt.Errorf("instance is not a slave: %+v", instanceKey)
}
_, err = ExecInstanceNoPrepare(instanceKey, `stop slave io_thread`)
_, err = ExecInstanceNoPrepare(instanceKey, `start slave sql_thread`)
if instance.SQLDelay == 0 {
// Otherwise we don't bother.
startTime := time.Now()
for upToDate := false; !upToDate; {
if timeout > 0 && time.Since(startTime) >= timeout {
// timeout
return nil, log.Errorf("StopSlaveNicely timeout on %+v", *instanceKey)
}
instance, err = ReadTopologyInstance(instanceKey)
if err != nil {
return instance, log.Errore(err)
}
if instance.SQLThreadUpToDate() {
upToDate = true
} else {
time.Sleep(sqlThreadPollDuration)
}
}
}
_, err = ExecInstanceNoPrepare(instanceKey, `stop slave`)
if err != nil {
// Patch; current MaxScale behavior for STOP SLAVE is to throw an error if slave already stopped.
if instance.isMaxScale() && err.Error() == "Error 1199: Slave connection is not running" {
err = nil
}
}
if err != nil {
return instance, log.Errore(err)
}
instance, err = ReadTopologyInstance(instanceKey)
log.Infof("Stopped slave nicely on %+v, Self:%+v, Exec:%+v", *instanceKey, instance.SelfBinlogCoordinates, instance.ExecBinlogCoordinates)
return instance, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:51,代码来源:instance_topology_dao.go
示例19: UpdateClusterAliases
func UpdateClusterAliases() error {
writeFunc := func() error {
_, err := db.ExecOrchestrator(`
replace into
cluster_alias (alias, cluster_name, last_registered)
select
suggested_cluster_alias,
substring_index(group_concat(cluster_name order by cluster_name), ',', 1) as cluster_name,
NOW()
from
database_instance
left join database_instance_downtime using (hostname, port)
where
suggested_cluster_alias!=''
and not (
(hostname, port) in (select hostname, port from topology_recovery where start_active_period >= now() - interval 11111 day)
and (
database_instance_downtime.downtime_active IS NULL
or database_instance_downtime.end_timestamp < NOW()
) is false
)
group by
suggested_cluster_alias
`)
if err == nil {
err = ReadClusterAliases()
}
return log.Errore(err)
}
return ExecDBWriteFunc(writeFunc)
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:31,代码来源:cluster_alias_dao.go
示例20: ReadAgents
// ReadAgents returns a list of all known agents
func ReadAgents() ([]Agent, error) {
res := []Agent{}
query := `
select
hostname,
port,
token,
last_submitted,
mysql_port
from
host_agent
order by
hostname
`
err := db.QueryOrchestratorRowsMap(query, func(m sqlutils.RowMap) error {
agent := Agent{}
agent.Hostname = m.GetString("hostname")
agent.Port = m.GetInt("port")
agent.MySQLPort = m.GetInt64("mysql_port")
agent.Token = ""
agent.LastSubmitted = m.GetString("last_submitted")
res = append(res, agent)
return nil
})
if err != nil {
log.Errore(err)
}
return res, err
}
开发者ID:rlowe,项目名称:orchestrator,代码行数:33,代码来源:agent_dao.go
注:本文中的github.com/outbrain/orchestrator/Godeps/_workspace/src/github.com/outbrain/golib/log.Errore函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论