本文整理汇总了Golang中github.com/Azure/go-autorest/autorest/to.Int32Ptr函数的典型用法代码示例。如果您正苦于以下问题:Golang Int32Ptr函数的具体用法?Golang Int32Ptr怎么用?Golang Int32Ptr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Int32Ptr函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: getTestLoadBalancer
func getTestLoadBalancer(services ...api.Service) network.LoadBalancer {
rules := []network.LoadBalancingRule{}
probes := []network.Probe{}
for _, service := range services {
for _, port := range service.Spec.Ports {
ruleName := getRuleName(&service, port)
rules = append(rules, network.LoadBalancingRule{
Name: to.StringPtr(ruleName),
Properties: &network.LoadBalancingRulePropertiesFormat{
FrontendPort: to.Int32Ptr(port.Port),
BackendPort: to.Int32Ptr(port.Port),
},
})
probes = append(probes, network.Probe{
Name: to.StringPtr(ruleName),
Properties: &network.ProbePropertiesFormat{
Port: to.Int32Ptr(port.NodePort),
},
})
}
}
lb := network.LoadBalancer{
Properties: &network.LoadBalancerPropertiesFormat{
LoadBalancingRules: &rules,
Probes: &probes,
},
}
return lb
}
开发者ID:maisem,项目名称:kubernetes,代码行数:32,代码来源:azure_test.go
示例2: createVolume
// createVolume updates the provided VirtualMachine's StorageProfile with the
// parameters for creating a new data disk. We don't actually interact with
// the Azure API until after all changes to the VirtualMachine are made.
func (v *azureVolumeSource) createVolume(
vm *compute.VirtualMachine,
p storage.VolumeParams,
storageAccount *armstorage.Account,
) (*storage.Volume, *storage.VolumeAttachment, error) {
lun, err := nextAvailableLUN(vm)
if err != nil {
return nil, nil, errors.Annotate(err, "choosing LUN")
}
dataDisksRoot := dataDiskVhdRoot(storageAccount)
dataDiskName := p.Tag.String()
vhdURI := dataDisksRoot + dataDiskName + vhdExtension
sizeInGib := mibToGib(p.Size)
dataDisk := compute.DataDisk{
Lun: to.Int32Ptr(lun),
DiskSizeGB: to.Int32Ptr(int32(sizeInGib)),
Name: to.StringPtr(dataDiskName),
Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)},
Caching: compute.ReadWrite,
CreateOption: compute.Empty,
}
var dataDisks []compute.DataDisk
if vm.Properties.StorageProfile.DataDisks != nil {
dataDisks = *vm.Properties.StorageProfile.DataDisks
}
dataDisks = append(dataDisks, dataDisk)
vm.Properties.StorageProfile.DataDisks = &dataDisks
// Data disks associate VHDs to machines. In Juju's storage model,
// the VHD is the volume and the disk is the volume attachment.
volume := storage.Volume{
p.Tag,
storage.VolumeInfo{
VolumeId: dataDiskName,
Size: gibToMib(sizeInGib),
// We don't currently support persistent volumes in
// Azure, as it requires removal of "comp=media" when
// deleting VMs, complicating cleanup.
Persistent: true,
},
}
volumeAttachment := storage.VolumeAttachment{
p.Tag,
p.Attachment.Machine,
storage.VolumeAttachmentInfo{
BusAddress: diskBusAddress(lun),
},
}
return &volume, &volumeAttachment, nil
}
开发者ID:bac,项目名称:juju,代码行数:57,代码来源:storage.go
示例3: getSecurityRules
// getSecurityRules creates network security group rules based on driver
// configuration such as SSH port, docker port and swarm port.
func (d *Driver) getSecurityRules(extraPorts []string) (*[]network.SecurityRule, error) {
mkRule := func(priority int, name, description, srcPort, dstPort string, proto network.SecurityRuleProtocol) network.SecurityRule {
return network.SecurityRule{
Name: to.StringPtr(name),
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr(description),
SourceAddressPrefix: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr("*"),
SourcePortRange: to.StringPtr(srcPort),
DestinationPortRange: to.StringPtr(dstPort),
Access: network.Allow,
Direction: network.Inbound,
Protocol: proto,
Priority: to.Int32Ptr(int32(priority)),
},
}
}
log.Debugf("Docker port is configured as %d", d.DockerPort)
// Base ports to be opened for any machine
rl := []network.SecurityRule{
mkRule(100, "SSHAllowAny", "Allow ssh from public Internet", "*", fmt.Sprintf("%d", d.BaseDriver.SSHPort), network.TCP),
mkRule(300, "DockerAllowAny", "Allow docker engine access (TLS-protected)", "*", fmt.Sprintf("%d", d.DockerPort), network.TCP),
}
// Open swarm port if configured
if d.BaseDriver.SwarmMaster {
swarmHost := d.BaseDriver.SwarmHost
log.Debugf("Swarm host is configured as %q", swarmHost)
u, err := url.Parse(swarmHost)
if err != nil {
return nil, fmt.Errorf("Cannot parse URL %q: %v", swarmHost, err)
}
_, swarmPort, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("Could not parse swarm port in %q: %v", u.Host, err)
}
rl = append(rl, mkRule(500, "DockerSwarmAllowAny", "Allow swarm manager access (TLS-protected)", "*", swarmPort, network.TCP))
} else {
log.Debug("Swarm host is not configured.")
}
// extra port numbers requested by user
basePri := 1000
for i, p := range extraPorts {
port, protocol := driverutil.SplitPortProto(p)
proto, err := parseSecurityRuleProtocol(protocol)
if err != nil {
return nil, fmt.Errorf("cannot parse security rule protocol: %v", err)
}
log.Debugf("User-requested port to be opened on NSG: %v/%s", port, proto)
r := mkRule(basePri+i, fmt.Sprintf("Port%s%sAllowAny", port, proto), "User requested port to be accessible from Internet via docker-machine", "*", port, proto)
rl = append(rl, r)
}
log.Debugf("Total NSG rules: %d", len(rl))
return &rl, nil
}
开发者ID:bgokden,项目名称:machine,代码行数:61,代码来源:util.go
示例4: SetOSDiskSizeGB
func (s *TemplateBuilder) SetOSDiskSizeGB(diskSizeGB int32) error {
resource, err := s.getResourceByType(resourceVirtualMachine)
if err != nil {
return err
}
profile := resource.Properties.StorageProfile
profile.OsDisk.DiskSizeGB = to.Int32Ptr(diskSizeGB)
return nil
}
开发者ID:jtopper,项目名称:packer,代码行数:11,代码来源:template_builder.go
示例5: makeSecurityRule
func makeSecurityRule(name, ipAddress, ports string) network.SecurityRule {
return network.SecurityRule{
Name: to.StringPtr(name),
Properties: &network.SecurityRulePropertiesFormat{
Protocol: network.TCP,
DestinationAddressPrefix: to.StringPtr(ipAddress),
DestinationPortRange: to.StringPtr(ports),
Access: network.Allow,
Priority: to.Int32Ptr(200),
Direction: network.Inbound,
},
}
}
开发者ID:bac,项目名称:juju,代码行数:13,代码来源:instance_test.go
示例6: TestSecurityRulePriorityFailsIfExhausted
func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) {
rules := []network.SecurityRule{}
var i int32
for i = loadBalancerMinimumPriority; i < loadBalancerMaximumPriority; i++ {
rules = append(rules, network.SecurityRule{
Properties: &network.SecurityRulePropertiesFormat{
Priority: to.Int32Ptr(i),
},
})
}
_, err := getNextAvailablePriority(rules)
if err == nil {
t.Error("Expectected an error. There are no priority levels left.")
}
}
开发者ID:maisem,项目名称:kubernetes,代码行数:17,代码来源:azure_test.go
示例7: newStorageProfile
// newStorageProfile creates the storage profile for a virtual machine,
// based on the series and chosen instance spec.
func newStorageProfile(
vmName string,
storageAccountName string,
instanceSpec *instances.InstanceSpec,
) (*compute.StorageProfile, error) {
logger.Debugf("creating storage profile for %q", vmName)
urnParts := strings.SplitN(instanceSpec.Image.Id, ":", 4)
if len(urnParts) != 4 {
return nil, errors.Errorf("invalid image ID %q", instanceSpec.Image.Id)
}
publisher := urnParts[0]
offer := urnParts[1]
sku := urnParts[2]
version := urnParts[3]
osDisksRoot := fmt.Sprintf(
`reference(resourceId('Microsoft.Storage/storageAccounts', '%s'), '%s').primaryEndpoints.blob`,
storageAccountName, storage.APIVersion,
)
osDiskName := vmName
osDiskURI := fmt.Sprintf(
`[concat(%s, '%s/%s%s')]`,
osDisksRoot, osDiskVHDContainer, osDiskName, vhdExtension,
)
osDiskSizeGB := mibToGB(instanceSpec.InstanceType.RootDisk)
osDisk := &compute.OSDisk{
Name: to.StringPtr(osDiskName),
CreateOption: compute.FromImage,
Caching: compute.ReadWrite,
Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(osDiskURI)},
DiskSizeGB: to.Int32Ptr(int32(osDiskSizeGB)),
}
return &compute.StorageProfile{
ImageReference: &compute.ImageReference{
Publisher: to.StringPtr(publisher),
Offer: to.StringPtr(offer),
Sku: to.StringPtr(sku),
Version: to.StringPtr(version),
},
OsDisk: osDisk,
}, nil
}
开发者ID:bac,项目名称:juju,代码行数:45,代码来源:environ.go
示例8: TestSecurityRulePriorityPicksNextAvailablePriority
func TestSecurityRulePriorityPicksNextAvailablePriority(t *testing.T) {
rules := []network.SecurityRule{}
var expectedPriority int32 = loadBalancerMinimumPriority + 50
var i int32
for i = loadBalancerMinimumPriority; i < expectedPriority; i++ {
rules = append(rules, network.SecurityRule{
Properties: &network.SecurityRulePropertiesFormat{
Priority: to.Int32Ptr(i),
},
})
}
priority, err := getNextAvailablePriority(rules)
if err != nil {
t.Errorf("Unexpectected error: %q", err)
}
if priority != expectedPriority {
t.Errorf("Expected priority %d. Got priority %d.", expectedPriority, priority)
}
}
开发者ID:maisem,项目名称:kubernetes,代码行数:23,代码来源:azure_test.go
示例9: TestAttachVolumes
func (s *storageSuite) TestAttachVolumes(c *gc.C) {
// machine-1 has a single data disk with LUN 0.
machine1DataDisks := []compute.DataDisk{{
Lun: to.Int32Ptr(0),
Name: to.StringPtr("volume-1"),
Vhd: &compute.VirtualHardDisk{
URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd",
storageAccountName,
)),
},
}}
// machine-2 has 32 data disks; no LUNs free.
machine2DataDisks := make([]compute.DataDisk, 32)
for i := range machine2DataDisks {
machine2DataDisks[i].Lun = to.Int32Ptr(int32(i))
machine2DataDisks[i].Name = to.StringPtr(fmt.Sprintf("volume-%d", i))
machine2DataDisks[i].Vhd = &compute.VirtualHardDisk{
URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-%d.vhd",
storageAccountName, i,
)),
}
}
// volume-0 and volume-2 are attached to machine-0
// volume-1 is attached to machine-1
// volume-3 is attached to machine-42, but machine-42 is missing
// volume-42 is attached to machine-2, but machine-2 has no free LUNs
makeParams := func(volume, machine string, size uint64) storage.VolumeAttachmentParams {
return storage.VolumeAttachmentParams{
AttachmentParams: storage.AttachmentParams{
Provider: "azure",
Machine: names.NewMachineTag(machine),
InstanceId: instance.Id("machine-" + machine),
},
Volume: names.NewVolumeTag(volume),
VolumeId: "volume-" + volume,
}
}
params := []storage.VolumeAttachmentParams{
makeParams("0", "0", 1),
makeParams("1", "1", 1025),
makeParams("2", "0", 1024),
makeParams("3", "42", 40),
makeParams("42", "2", 50),
}
virtualMachines := []compute.VirtualMachine{{
Name: to.StringPtr("machine-0"),
Properties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{},
},
}, {
Name: to.StringPtr("machine-1"),
Properties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{DataDisks: &machine1DataDisks},
},
}, {
Name: to.StringPtr("machine-2"),
Properties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{DataDisks: &machine2DataDisks},
},
}}
// There should be a one API calls to list VMs, and one update per modified instance.
virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{
Value: &virtualMachines,
})
virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines`
updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{})
updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0`
volumeSource := s.volumeSource(c)
s.sender = azuretesting.Senders{
virtualMachinesSender,
s.accountSender(),
updateVirtualMachine0Sender,
}
results, err := volumeSource.AttachVolumes(params)
c.Assert(err, jc.ErrorIsNil)
c.Assert(results, gc.HasLen, len(params))
c.Check(results[0].Error, jc.ErrorIsNil)
c.Check(results[1].Error, jc.ErrorIsNil)
c.Check(results[2].Error, jc.ErrorIsNil)
c.Check(results[3].Error, gc.ErrorMatches, "instance machine-42 not found")
c.Check(results[4].Error, gc.ErrorMatches, "choosing LUN: all LUNs are in use")
// Validate HTTP request bodies.
c.Assert(s.requests, gc.HasLen, 3)
c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines
c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts
c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0
machine0DataDisks := []compute.DataDisk{{
Lun: to.Int32Ptr(0),
Name: to.StringPtr("volume-0"),
Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd",
//.........这里部分代码省略.........
开发者ID:bac,项目名称:juju,代码行数:101,代码来源:storage_test.go
示例10: assertStartInstanceRequests
func (s *environSuite) assertStartInstanceRequests(
c *gc.C,
requests []*http.Request,
args assertStartInstanceRequestsParams,
) startInstanceRequests {
nsgId := `[resourceId('Microsoft.Network/networkSecurityGroups', 'juju-internal-nsg')]`
securityRules := []network.SecurityRule{{
Name: to.StringPtr("SSHInbound"),
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr("Allow SSH access to all machines"),
Protocol: network.TCP,
SourceAddressPrefix: to.StringPtr("*"),
SourcePortRange: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr("*"),
DestinationPortRange: to.StringPtr("22"),
Access: network.Allow,
Priority: to.Int32Ptr(100),
Direction: network.Inbound,
},
}, {
Name: to.StringPtr("JujuAPIInbound"),
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr("Allow API connections to controller machines"),
Protocol: network.TCP,
SourceAddressPrefix: to.StringPtr("*"),
SourcePortRange: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr("192.168.16.0/20"),
DestinationPortRange: to.StringPtr("17777"),
Access: network.Allow,
Priority: to.Int32Ptr(101),
Direction: network.Inbound,
},
}}
subnets := []network.Subnet{{
Name: to.StringPtr("juju-internal-subnet"),
Properties: &network.SubnetPropertiesFormat{
AddressPrefix: to.StringPtr("192.168.0.0/20"),
NetworkSecurityGroup: &network.SecurityGroup{
ID: to.StringPtr(nsgId),
},
},
}, {
Name: to.StringPtr("juju-controller-subnet"),
Properties: &network.SubnetPropertiesFormat{
AddressPrefix: to.StringPtr("192.168.16.0/20"),
NetworkSecurityGroup: &network.SecurityGroup{
ID: to.StringPtr(nsgId),
},
},
}}
subnetName := "juju-internal-subnet"
privateIPAddress := "192.168.0.4"
if args.availabilitySetName == "juju-controller" {
subnetName = "juju-controller-subnet"
privateIPAddress = "192.168.16.4"
}
subnetId := fmt.Sprintf(
`[concat(resourceId('Microsoft.Network/virtualNetworks', 'juju-internal-network'), '/subnets/%s')]`,
subnetName,
)
publicIPAddressId := `[resourceId('Microsoft.Network/publicIPAddresses', 'machine-0-public-ip')]`
ipConfigurations := []network.InterfaceIPConfiguration{{
Name: to.StringPtr("primary"),
Properties: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
PrivateIPAddress: to.StringPtr(privateIPAddress),
PrivateIPAllocationMethod: network.Static,
Subnet: &network.Subnet{ID: to.StringPtr(subnetId)},
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr(publicIPAddressId),
},
},
}}
nicId := `[resourceId('Microsoft.Network/networkInterfaces', 'machine-0-primary')]`
nics := []compute.NetworkInterfaceReference{{
ID: to.StringPtr(nicId),
Properties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(true),
},
}}
vmDependsOn := []string{
nicId,
`[resourceId('Microsoft.Storage/storageAccounts', '` + storageAccountName + `')]`,
}
addressPrefixes := []string{"192.168.0.0/20", "192.168.16.0/20"}
templateResources := []armtemplates.Resource{{
APIVersion: network.APIVersion,
Type: "Microsoft.Network/networkSecurityGroups",
Name: "juju-internal-nsg",
Location: "westus",
Tags: to.StringMap(s.envTags),
Properties: &network.SecurityGroupPropertiesFormat{
SecurityRules: &securityRules,
},
}, {
//.........这里部分代码省略.........
开发者ID:bac,项目名称:juju,代码行数:101,代码来源:environ_test.go
示例11: SetUpTest
func (s *environSuite) SetUpTest(c *gc.C) {
s.BaseSuite.SetUpTest(c)
s.storageClient = azuretesting.MockStorageClient{}
s.sender = nil
s.requests = nil
s.retryClock = mockClock{Clock: gitjujutesting.NewClock(time.Time{})}
s.provider = newProvider(c, azure.ProviderConfig{
Sender: azuretesting.NewSerialSender(&s.sender),
RequestInspector: azuretesting.RequestRecorder(&s.requests),
NewStorageClient: s.storageClient.NewClient,
RetryClock: &gitjujutesting.AutoAdvancingClock{
&s.retryClock, s.retryClock.Advance,
},
RandomWindowsAdminPassword: func() string { return "sorandom" },
InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal,
})
s.controllerUUID = testing.ControllerTag.Id()
s.envTags = map[string]*string{
"juju-model-uuid": to.StringPtr(testing.ModelTag.Id()),
"juju-controller-uuid": to.StringPtr(s.controllerUUID),
}
s.vmTags = map[string]*string{
"juju-model-uuid": to.StringPtr(testing.ModelTag.Id()),
"juju-controller-uuid": to.StringPtr(s.controllerUUID),
"juju-machine-name": to.StringPtr("machine-0"),
}
s.group = &resources.ResourceGroup{
Location: to.StringPtr("westus"),
Tags: &s.envTags,
Properties: &resources.ResourceGroupProperties{
ProvisioningState: to.StringPtr("Succeeded"),
},
}
vmSizes := []compute.VirtualMachineSize{{
Name: to.StringPtr("Standard_D1"),
NumberOfCores: to.Int32Ptr(1),
OsDiskSizeInMB: to.Int32Ptr(1047552),
ResourceDiskSizeInMB: to.Int32Ptr(51200),
MemoryInMB: to.Int32Ptr(3584),
MaxDataDiskCount: to.Int32Ptr(2),
}}
s.vmSizes = &compute.VirtualMachineSizeListResult{Value: &vmSizes}
s.storageAccount = &storage.Account{
Name: to.StringPtr("my-storage-account"),
Type: to.StringPtr("Standard_LRS"),
Tags: &s.envTags,
Properties: &storage.AccountProperties{
PrimaryEndpoints: &storage.Endpoints{
Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", storageAccountName)),
},
ProvisioningState: "Succeeded",
},
}
keys := []storage.AccountKey{{
KeyName: to.StringPtr("key-1-name"),
Value: to.StringPtr("key-1"),
Permissions: storage.FULL,
}}
s.storageAccountKeys = &storage.AccountListKeysResult{
Keys: &keys,
}
s.ubuntuServerSKUs = []compute.VirtualMachineImageResource{
{Name: to.StringPtr("12.04-LTS")},
{Name: to.StringPtr("12.10")},
{Name: to.StringPtr("14.04-LTS")},
{Name: to.StringPtr("15.04")},
{Name: to.StringPtr("15.10")},
{Name: to.StringPtr("16.04-LTS")},
}
s.deployment = nil
}
开发者ID:bac,项目名称:juju,代码行数:79,代码来源:environ_test.go
示例12: TestDetachVolumes
func (s *storageSuite) TestDetachVolumes(c *gc.C) {
// machine-0 has a three data disks: volume-0, volume-1 and volume-2
machine0DataDisks := []compute.DataDisk{{
Lun: to.Int32Ptr(0),
Name: to.StringPtr("volume-0"),
Vhd: &compute.VirtualHardDisk{
URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd",
storageAccountName,
)),
},
}, {
Lun: to.Int32Ptr(1),
Name: to.StringPtr("volume-1"),
Vhd: &compute.VirtualHardDisk{
URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd",
storageAccountName,
)),
},
}, {
Lun: to.Int32Ptr(2),
Name: to.StringPtr("volume-2"),
Vhd: &compute.VirtualHardDisk{
URI: to.StringPtr(fmt.Sprintf(
"https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd",
storageAccountName,
)),
},
}}
makeParams := func(volume, machine string) storage.VolumeAttachmentParams {
return storage.VolumeAttachmentParams{
AttachmentParams: storage.AttachmentParams{
Provider: "azure",
Machine: names.NewMachineTag(machine),
InstanceId: instance.Id("machine-" + machine),
},
Volume: names.NewVolumeTag(volume),
VolumeId: "volume-" + volume,
}
}
params := []storage.VolumeAttachmentParams{
makeParams("1", "0"),
makeParams("1", "0"),
makeParams("42", "1"),
makeParams("2", "42"),
}
virtualMachines := []compute.VirtualMachine{{
Name: to.StringPtr("machine-0"),
Properties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{DataDisks: &machine0DataDisks},
},
}, {
Name: to.StringPtr("machine-1"),
Properties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{},
},
}}
// There should be a one API calls to list VMs, and one update per modified instance.
virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{
Value: &virtualMachines,
})
virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines`
updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{})
updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0`
volumeSource := s.volumeSource(c)
s.sender = azuretesting.Senders{
virtualMachinesSender,
s.accountSender(),
updateVirtualMachine0Sender,
}
results, err := volumeSource.DetachVolumes(params)
c.Assert(err, jc.ErrorIsNil)
c.Assert(results, gc.HasLen, len(params))
c.Check(results[0], jc.ErrorIsNil)
c.Check(results[1], jc.ErrorIsNil)
c.Check(results[2], jc.ErrorIsNil)
c.Check(results[3], gc.ErrorMatches, "instance machine-42 not found")
// Validate HTTP request bodies.
c.Assert(s.requests, gc.HasLen, 3)
c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines
c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts
c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0
machine0DataDisks = []compute.DataDisk{
machine0DataDisks[0],
machine0DataDisks[2],
}
virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks
assertRequestBody(c, s.requests[2], &virtualMachines[0])
}
开发者ID:bac,项目名称:juju,代码行数:97,代码来源:storage_test.go
示例13: TestInstanceOpenPortsAlreadyOpen
func (s *instanceSuite) TestInstanceOpenPortsAlreadyOpen(c *gc.C) {
internalSubnetId := path.Join(
"/subscriptions", fakeSubscriptionId,
"resourceGroups/juju-testenv-model-deadbeef-0bad-400d-8000-4b1d0d06f00d",
"providers/Microsoft.Network/virtualnetworks/juju-internal-network/subnets/juju-internal-subnet",
)
ipConfiguration := network.InterfaceIPConfiguration{
Properties: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
PrivateIPAddress: to.StringPtr("10.0.0.4"),
Subnet: &network.Subnet{
ID: to.StringPtr(internalSubnetId),
},
},
}
s.networkInterfaces = []network.Interface{
makeNetworkInterface("nic-0", "machine-0", ipConfiguration),
}
inst := s.getInstance(c)
okSender := mocks.NewSender()
okSender.AppendResponse(mocks.NewResponseWithContent("{}"))
nsgSender := networkSecurityGroupSender([]network.SecurityRule{{
Name: to.StringPtr("machine-0-tcp-1000"),
Properties: &network.SecurityRulePropertiesFormat{
Protocol: network.Asterisk,
DestinationPortRange: to.StringPtr("1000"),
Access: network.Allow,
Priority: to.Int32Ptr(202),
Direction: network.Inbound,
},
}})
s.sender = azuretesting.Senders{nsgSender, okSender, okSender}
err := inst.OpenPorts("0", []jujunetwork.PortRange{{
Protocol: "tcp",
FromPort: 1000,
ToPort: 1000,
}, {
Protocol: "udp",
FromPort: 1000,
ToPort: 2000,
}})
c.Assert(err, jc.ErrorIsNil)
c.Assert(s.requests, gc.HasLen, 2)
c.Assert(s.requests[0].Method, gc.Equals, "GET")
c.Assert(s.requests[0].URL.Path, gc.Equals, internalSecurityGroupPath)
c.Assert(s.requests[1].Method, gc.Equals, "PUT")
c.Assert(s.requests[1].URL.Path, gc.Equals, securityRulePath("machine-0-udp-1000-2000"))
assertRequestBody(c, s.requests[1], &network.SecurityRule{
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr("1000-2000/udp"),
Protocol: network.UDP,
SourcePortRange: to.StringPtr("*"),
SourceAddressPrefix: to.StringPtr("*"),
DestinationPortRange: to.StringPtr("1000-2000"),
DestinationAddressPrefix: to.StringPtr("10.0.0.4"),
Access: network.Allow,
Priority: to.Int32Ptr(200),
Direction: network.Inbound,
},
})
}
开发者ID:bac,项目名称:juju,代码行数:64,代码来源:instance_test.go
示例14: OpenPorts
// OpenPorts is specified in the Instance interface.
func (inst *azureInstance) OpenPorts(machineId string, ports []jujunetwork.PortRange) error {
nsgClient := network.SecurityGroupsClient{inst.env.network}
securityRuleClient := network.SecurityRulesClient{inst.env.network}
primaryNetworkAddress, err := inst.primaryNetworkAddress()
if err != nil {
return errors.Trace(err)
}
securityGroupName := internalSecurityGroupName
var nsg network.SecurityGroup
if err := inst.env.callAPI(func() (autorest.Response, error) {
var err error
nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName, "")
return nsg.Response, err
}); err != nil {
return errors.Annotate(err, "querying network security group")
}
var securityRules []network.SecurityRule
if nsg.Properties.SecurityRules != nil {
securityRules = *nsg.Properties.SecurityRules
} else {
nsg.Properties.SecurityRules = &securityRules
}
// Create rules one at a time; this is necessary to avoid trampling
// on changes made by the provisioner. We still record rules in the
// NSG in memory, so we can easily tell which priorities are available.
vmName := resourceName(names.NewMachineTag(machineId))
prefix := instanceNetworkSecurityRulePrefix(instance.Id(vmName))
for _, ports := range ports {
ruleName := securityRuleName(prefix, ports)
// Check if the rule already exists; OpenPorts must be idempotent.
var found bool
for _, rule := range securityRules {
if to.String(rule.Name) == ruleName {
found = true
break
}
}
if found {
logger.Debugf("security rule %q already exists", ruleName)
continue
}
logger.Debugf("creating security rule %q", ruleName)
priority, err := nextSecurityRulePriority(nsg, securityRuleInternalMax+1, securityRuleMax)
if err != nil {
return errors.Annotatef(err, "getting security rule priority for %s", ports)
}
var protocol network.SecurityRuleProtocol
switch ports.Protocol {
case "tcp":
protocol = network.TCP
case "udp":
protocol = network.UDP
default:
return errors.Errorf("invalid protocol %q", ports.Protocol)
}
var portRange string
if ports.FromPort != ports.ToPort {
portRange = fmt.Sprintf("%d-%d", ports.FromPort, ports.ToPort)
} else {
portRange = fmt.Sprint(ports.FromPort)
}
rule := network.SecurityRule{
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr(ports.String()),
Protocol: protocol,
SourcePortRange: to.StringPtr("*"),
DestinationPortRange: to.StringPtr(portRange),
SourceAddressPrefix: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr(primaryNetworkAddress.Value),
Access: network.Allow,
Priority: to.Int32Ptr(priority),
Direction: network.Inbound,
},
}
if err := inst.env.callAPI(func() (autorest.Response, error) {
return securityRuleClient.CreateOrUpdate(
inst.env.resourceGroup, securityGroupName, ruleName, rule,
nil, // abort channel
)
}); err != nil {
return errors.Annotatef(err, "creating security rule for %s", ports)
}
securityRules = append(securityRules, rule)
}
return nil
}
开发者ID:bac,项目名称:juju,代码行数:95,代码来源:instance.go
示例15: reconcileLoadBalancer
// This ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodeNames []string) (network.LoadBalancer, bool, error) {
lbName := getLoadBalancerName(clusterName)
serviceName := getServiceName(service)
lbFrontendIPConfigName := getFrontendIPConfigName(service)
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
lbBackendPoolName := getBackendPoolName(clusterName)
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName)
wantLb := len(service.Spec.Ports) > 0
dirtyLb := false
// Ensure LoadBalancer's Backend Pool Configuration
if wantLb {
if lb.Properties.BackendAddressPools == nil ||
len(*lb.Properties.BackendAddressPools) == 0 {
lb.Properties.BackendAddressPools = &[]network.BackendAddressPool{
{
Name: to.StringPtr(lbBackendPoolName),
},
}
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding", serviceName, wantLb)
dirtyLb = true
} else if len(*lb.Properties.BackendAddressPools) != 1 ||
!strings.EqualFold(*(*lb.Properties.BackendAddressPools)[0].Name, lbBackendPoolName) {
return lb, false, fmt.Errorf("loadbalancer is misconfigured with a different backend pool")
}
}
// Ensure LoadBalancer's Frontend IP Configurations
dirtyConfigs := false
newConfigs := []network.FrontendIPConfiguration{}
if lb.Properties.FrontendIPConfigurations != nil {
newConfigs = *lb.Properties.FrontendIPConfigurations
}
if !wantLb {
for i := len(newConfigs) - 1; i >= 0; i-- {
config := newConfigs[i]
if strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
dirtyConfigs = true
}
}
} else {
foundConfig := false
for _, config := range newConfigs {
if strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
foundConfig = true
break
}
}
if !foundConfig {
newConfigs = append(newConfigs,
network.FrontendIPConfiguration{
Name: to.StringPtr(lbFrontendIPConfigName),
Properties: &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{
ID: pip.ID,
},
},
})
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
dirtyConfigs = true
}
}
if dirtyConfigs {
dirtyLb = true
lb.Properties.FrontendIPConfigurations = &newConfigs
}
// update probes/rules
expectedProbes := make([]network.Probe, len(service.Spec.Ports))
expectedRules := make([]network.LoadBalancingRule, len(service.Spec.Ports))
for i, port := range service.Spec.Ports {
lbRuleName := getRuleName(service, port)
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
if err != nil {
return lb, false, err
}
if serviceapi.NeedsHealthCheck(service) {
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
expectedProbes[i] = network.Probe{
Name: &lbRuleName,
Properties: &network.ProbePropertiesFormat{
RequestPath: to.StringPtr(podPresencePath),
Protocol: network.ProbeProtocolHTTP,
Port: to.Int32Ptr(podPresencePort),
IntervalInSeconds: to.Int32Ptr(5),
NumberOfProbes: to.Int32Ptr(2),
},
}
} else {
expectedProbes[i] = network.Probe{
Name: &lbRuleName,
//.........这里部分代码省略.........
开发者ID:nak3,项目名称:kubernetes,代码行数:101,代码来源:azure_loadbalancer.go
示例16:
// controller machines
securityRuleInternalAPIInbound
)
var (
sshSecurityRule = network.SecurityRule{
Name: to.StringPtr("SSHInbound"),
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr("Allow SSH access to all machines"),
Protocol: network.TCP,
SourceAddressPrefix: to.StringPtr("*"),
SourcePortRange: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr("*"),
DestinationPortRange: to.StringPtr("22"),
Access: network.Allow,
Priority: to.Int32Ptr(securityRuleInternalSSHInbound),
Direction: network.Inbound,
},
}
apiSecurityRule = network.SecurityRule{
Name: to.StringPtr("JujuAPIInbound"),
Properties: &network.SecurityRulePropertiesFormat{
Description: to.StringPtr("Allow API connections to controller machines"),
Protocol: network.TCP,
SourceAddressPrefix: to.StringPtr("*"),
SourcePortRange: to.StringPtr("*"),
DestinationAddressPrefix: to.StringPtr(controllerSubnetPrefix),
// DestinationPortRange is set by createInternalNetworkSecurityGroup.
Access: network.Allow,
Priority: to.Int32Ptr(securityRuleInternalAPIInbound),
开发者ID:bac,项目名称:juju,代码行数:31,代码来源:networking.go
示例17: attachVolume
func (v *azureVolumeSource) attachVolume(
vm *compute.VirtualMachine,
p storage.VolumeAttachmentParams,
storageAccount *armstorage.Account,
) (_ *storage.VolumeAttachment, updated bool, _ error) {
storageAccount, err := v.env.getStorageAccount(false)
if err != nil {
return nil, false, errors.Trace(err)
}
dataDisksRoot := dataDiskVhdRoot(storageAccount)
dataDiskName := p.VolumeId
vhdURI := dataDisksRoot + dataDiskName + vhdExtension
var dataDisks []compute.DataDisk
if vm.Properties.StorageProfile.DataDisks != nil {
dataDisks = *vm.Properties.StorageProfile.DataDisks
}
for _, disk := range dataDisks {
if to.String(disk.Name) != p.VolumeId {
continue
}
if to.String(disk.Vhd.URI) != vhdURI {
continue
}
// Disk is already attached.
volumeAttachment := &storage.VolumeAttachment{
p.Volume,
p.Machine,
storage.VolumeAttachmentInfo{
BusAddress: diskBusAddress(to.Int32(disk.Lun)),
},
}
return volumeAttachment, false, nil
}
lun, err := nextAvailableLUN(vm)
if err != nil {
return nil, false, errors.Annotate(err, "choosing LUN")
}
dataDisk := compute.DataDisk{
Lun: to.Int32Ptr(lun),
Name: to.StringPtr(dataDiskName),
Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)},
Caching: compute.ReadWrite,
CreateOption: compute.Attach,
}
dataDisks = append(dataDisks, dataDisk)
vm.Properties.StorageProfile.DataDisks = &dataDisks
volumeAttachment := storage.VolumeAttachment{
p.Volume,
p.Machine,
storage.VolumeAttachmentInfo{
BusAddress: diskBusAddress(lun),
},
}
return &volumeAttachment, true, nil
}
开发者ID:bac,项目名称:juju,代码行数:61,代码来源:storage.go
示例18: reconcileSecurityGroup
// This reconciles the Network Security Group similar to how the LB is reconciled.
// This entails adding required, missing SecurityRules and removing stale rules.
func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service) (network.SecurityGroup, bool, error) {
serviceName := getServiceName(service)
wantLb := len(service.Spec.Ports) > 0
sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service)
if err != nil {
return sg, false, err
}
var sourceAddressPrefixes []string
if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) {
sourceAddressPrefixes = []string{"Internet"}
} else {
for _, ip := range sourceRanges {
sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
}
}
expectedSecurityRules := make([]network.SecurityRule, len(service.Spec.Ports)*len(sourceAddressPrefixes))
for i, port := range service.Spec.Ports {
securityRuleName := getRuleName(service, port)
_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol)
if err != nil {
return sg, false, err
}
for j := range sourceAddressPrefixes {
ix := i*len(sourceAddressPrefixes) + j
expectedSecurityRules[ix] = network.SecurityRule{
Name: to.StringPtr(securityRuleName),
Properties: &network.SecurityRulePropertiesFormat{
Protocol: securityProto,
SourcePortRange: to.StringPtr("*"),
DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))),
SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]),
DestinationAddressPrefix: to.StringPtr("*"),
Access: network.Allow,
Direction: network.Inbound,
},
}
}
}
// update security rules
dirtySg := false
var updatedRules []network.SecurityRule
if sg.Properties.SecurityRules != nil {
updatedRules = *sg.Properties.SecurityRules
}
// update security rules: remove unwanted
for i := len(updatedRules) - 1; i >= 0; i-- {
existingRule := updatedRules[i]
if serviceOwnsRule(service, *existingRule.Name) {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
keepRule := false
if findSecurityRule(expectedSecurityRules, existingRule) {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
keepRule = true
}
if !keepRule {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
dirtySg = true
}
}
}
// update security rules: add needed
for _, expectedRule := range expectedSecurityRules {
foundRule := false
if findSecurityRule(updatedRules, expectedRule) {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
foundRule = true
}
if !foundRule {
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name)
nextAvailablePriority, err := getNextAvailablePriority(updatedRules)
if err != nil {
return sg, fals
|
请发表评论