kubernetes desired_state_of_world_populator 源码
kubernetes desired_state_of_world_populator 代码
文件路径:/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package populator implements interfaces that monitor and keep the states of the
caches in sync with the "ground truth".
*/
package populator
import (
"context"
"errors"
"fmt"
"sync"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// DesiredStateOfWorldPopulator periodically loops through the list of active
// pods and ensures that each one exists in the desired state of the world cache
// if it has volumes. It also verifies that the pods in the desired state of the
// world cache still exist, if not, it removes them.
type DesiredStateOfWorldPopulator interface {
Run(sourcesReady config.SourcesReady, stopCh <-chan struct{})
// ReprocessPod sets value for the specified pod in processedPods
// to false, forcing it to be reprocessed. This is required to enable
// remounting volumes on pod updates (volumes like Downward API volumes
// depend on this behavior to ensure volume content is updated).
ReprocessPod(podName volumetypes.UniquePodName)
// HasAddedPods returns whether the populator has looped through the list
// of active pods and added them to the desired state of the world cache,
// at a time after sources are all ready, at least once. It does not
// return true before sources are all ready because before then, there is
// a chance many or all pods are missing from the list of active pods and
// so few to none will have been added.
HasAddedPods() bool
}
// podStateProvider can determine if a pod is going to be terminated.
type podStateProvider interface {
ShouldPodContainersBeTerminating(types.UID) bool
ShouldPodRuntimeBeRemoved(types.UID) bool
}
// NewDesiredStateOfWorldPopulator returns a new instance of
// DesiredStateOfWorldPopulator.
//
// kubeClient - used to fetch PV and PVC objects from the API server
// loopSleepDuration - the amount of time the populator loop sleeps between
//
// successive executions
//
// podManager - the kubelet podManager that is the source of truth for the pods
//
// that exist on this host
//
// desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator(
kubeClient clientset.Interface,
loopSleepDuration time.Duration,
getPodStatusRetryDuration time.Duration,
podManager pod.Manager,
podStateProvider podStateProvider,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
kubeContainerRuntime kubecontainer.Runtime,
keepTerminatedPodVolumes bool,
csiMigratedPluginManager csimigration.PluginManager,
intreeToCSITranslator csimigration.InTreeToCSITranslator,
volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
kubeClient: kubeClient,
loopSleepDuration: loopSleepDuration,
getPodStatusRetryDuration: getPodStatusRetryDuration,
podManager: podManager,
podStateProvider: podStateProvider,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
pods: processedPods{
processedPods: make(map[volumetypes.UniquePodName]bool)},
kubeContainerRuntime: kubeContainerRuntime,
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
hasAddedPods: false,
hasAddedPodsLock: sync.RWMutex{},
csiMigratedPluginManager: csiMigratedPluginManager,
intreeToCSITranslator: intreeToCSITranslator,
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorldPopulator struct {
kubeClient clientset.Interface
loopSleepDuration time.Duration
getPodStatusRetryDuration time.Duration
podManager pod.Manager
podStateProvider podStateProvider
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
pods processedPods
kubeContainerRuntime kubecontainer.Runtime
timeOfLastGetPodStatus time.Time
keepTerminatedPodVolumes bool
hasAddedPods bool
hasAddedPodsLock sync.RWMutex
csiMigratedPluginManager csimigration.PluginManager
intreeToCSITranslator csimigration.InTreeToCSITranslator
volumePluginMgr *volume.VolumePluginMgr
}
type processedPods struct {
processedPods map[volumetypes.UniquePodName]bool
sync.RWMutex
}
func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
// Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly
klog.InfoS("Desired state populator starts to run")
wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) {
done := sourcesReady.AllReady()
dswp.populatorLoop()
return done, nil
}, stopCh)
dswp.hasAddedPodsLock.Lock()
dswp.hasAddedPods = true
dswp.hasAddedPodsLock.Unlock()
wait.Until(dswp.populatorLoop, dswp.loopSleepDuration, stopCh)
}
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
podName volumetypes.UniquePodName) {
dswp.markPodProcessingFailed(podName)
}
func (dswp *desiredStateOfWorldPopulator) HasAddedPods() bool {
dswp.hasAddedPodsLock.RLock()
defer dswp.hasAddedPodsLock.RUnlock()
return dswp.hasAddedPods
}
func (dswp *desiredStateOfWorldPopulator) populatorLoop() {
dswp.findAndAddNewPods()
// findAndRemoveDeletedPods() calls out to the container runtime to
// determine if the containers for a given pod are terminated. This is
// an expensive operation, therefore we limit the rate that
// findAndRemoveDeletedPods() is called independently of the main
// populator loop.
if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration {
klog.V(5).InfoS("Skipping findAndRemoveDeletedPods(). ", "nextRetryTime", dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration), "retryDuration", dswp.getPodStatusRetryDuration)
return
}
dswp.findAndRemoveDeletedPods()
}
// Iterate through all pods and add to desired state of world if they don't
// exist but should
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() {
// Map unique pod name to outer volume name to MountedVolume.
mountedVolumesForPod := make(map[volumetypes.UniquePodName]map[string]cache.MountedVolume)
for _, mountedVolume := range dswp.actualStateOfWorld.GetMountedVolumes() {
mountedVolumes, exist := mountedVolumesForPod[mountedVolume.PodName]
if !exist {
mountedVolumes = make(map[string]cache.MountedVolume)
mountedVolumesForPod[mountedVolume.PodName] = mountedVolumes
}
mountedVolumes[mountedVolume.OuterVolumeSpecName] = mountedVolume
}
for _, pod := range dswp.podManager.GetPods() {
if dswp.podStateProvider.ShouldPodContainersBeTerminating(pod.UID) {
// Do not (re)add volumes for pods that can't also be starting containers
continue
}
dswp.processPodVolumes(pod, mountedVolumesForPod)
}
}
// Iterate through all pods in desired state of world, and remove if they no
// longer exist
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() {
pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID)
if podExists {
// check if the attachability has changed for this volume
if volumeToMount.PluginIsAttachable {
attachableVolumePlugin, err := dswp.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
// only this means the plugin is truly non-attachable
if err == nil && attachableVolumePlugin == nil {
// It is not possible right now for a CSI plugin to be both attachable and non-deviceMountable
// So the uniqueVolumeName should remain the same after the attachability change
dswp.desiredStateOfWorld.MarkVolumeAttachability(volumeToMount.VolumeName, false)
klog.InfoS("Volume changes from attachable to non-attachable", "volumeName", volumeToMount.VolumeName)
continue
}
}
// Exclude known pods that we expect to be running
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) {
continue
}
if dswp.keepTerminatedPodVolumes {
continue
}
}
// Once a pod has been deleted from kubelet pod manager, do not delete
// it immediately from volume manager. Instead, check the kubelet
// pod state provider to verify that all containers in the pod have been
// terminated.
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(volumeToMount.Pod.UID) {
klog.V(4).InfoS("Pod still has one or more containers in the non-exited state and will not be removed from desired state", "pod", klog.KObj(volumeToMount.Pod))
continue
}
var volumeToMountSpecName string
if volumeToMount.VolumeSpec != nil {
volumeToMountSpecName = volumeToMount.VolumeSpec.Name()
}
removed := dswp.actualStateOfWorld.PodRemovedFromVolume(volumeToMount.PodName, volumeToMount.VolumeName)
if removed && podExists {
klog.V(4).InfoS("Actual state does not yet have volume mount information and pod still exists in pod manager, skip removing volume from desired state", "pod", klog.KObj(volumeToMount.Pod), "podUID", volumeToMount.Pod.UID, "volumeName", volumeToMountSpecName)
continue
}
klog.V(4).InfoS("Removing volume from desired state", "pod", klog.KObj(volumeToMount.Pod), "podUID", volumeToMount.Pod.UID, "volumeName", volumeToMountSpecName)
dswp.desiredStateOfWorld.DeletePodFromVolume(
volumeToMount.PodName, volumeToMount.VolumeName)
dswp.deleteProcessedPod(volumeToMount.PodName)
}
podsWithError := dswp.desiredStateOfWorld.GetPodsWithErrors()
for _, podName := range podsWithError {
if _, podExists := dswp.podManager.GetPodByUID(types.UID(podName)); !podExists {
dswp.desiredStateOfWorld.PopPodErrors(podName)
}
}
}
// processPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world.
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
pod *v1.Pod,
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume) {
if pod == nil {
return
}
uniquePodName := util.GetUniquePodName(pod)
if dswp.podPreviouslyProcessed(uniquePodName) {
return
}
allVolumesAdded := true
mounts, devices, seLinuxContainerContexts := util.GetPodVolumeNames(pod)
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
if !mounts.Has(podVolume.Name) && !devices.Has(podVolume.Name) {
// Volume is not used in the pod, ignore it.
klog.V(4).InfoS("Skipping unused volume", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
continue
}
pvc, volumeSpec, volumeGidValue, err :=
dswp.createVolumeSpec(podVolume, pod, mounts, devices)
if err != nil {
klog.ErrorS(err, "Error processing volume", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
dswp.desiredStateOfWorld.AddErrorToPod(uniquePodName, err.Error())
allVolumesAdded = false
continue
}
// Add volume to desired state of world
uniqueVolumeName, err := dswp.desiredStateOfWorld.AddPodToVolume(
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue, seLinuxContainerContexts[podVolume.Name])
if err != nil {
klog.ErrorS(err, "Failed to add volume to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
dswp.desiredStateOfWorld.AddErrorToPod(uniquePodName, err.Error())
allVolumesAdded = false
} else {
klog.V(4).InfoS("Added volume to desired state", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
}
// sync reconstructed volume
dswp.actualStateOfWorld.SyncReconstructedVolume(uniqueVolumeName, uniquePodName, podVolume.Name)
dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec, uniquePodName, mountedVolumesForPod)
}
// some of the volume additions may have failed, should not mark this pod as fully processed
if allVolumesAdded {
dswp.markPodProcessed(uniquePodName)
// New pod has been synced. Re-mount all volumes that need it
// (e.g. DownwardAPI)
dswp.actualStateOfWorld.MarkRemountRequired(uniquePodName)
// Remove any stored errors for the pod, everything went well in this processPodVolumes
dswp.desiredStateOfWorld.PopPodErrors(uniquePodName)
} else if dswp.podHasBeenSeenOnce(uniquePodName) {
// For the Pod which has been processed at least once, even though some volumes
// may not have been reprocessed successfully this round, we still mark it as processed to avoid
// processing it at a very high frequency. The pod will be reprocessed when volume manager calls
// ReprocessPod() which is triggered by SyncPod.
dswp.markPodProcessed(uniquePodName)
}
}
// checkVolumeFSResize records desired PVC size for a volume mounted by the pod.
// It is used for comparison with actual size(coming from pvc.Status.Capacity) and calling
// volume expansion on the node if needed.
func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
pod *v1.Pod,
podVolume v1.Volume,
pvc *v1.PersistentVolumeClaim,
volumeSpec *volume.Spec,
uniquePodName volumetypes.UniquePodName,
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume) {
// if a volumeSpec does not have PV or has InlineVolumeSpecForCSIMigration set or pvc is nil
// we can't resize the volume and hence resizing should be skipped.
if volumeSpec.PersistentVolume == nil || volumeSpec.InlineVolumeSpecForCSIMigration || pvc == nil {
// Only PVC supports resize operation.
return
}
uniqueVolumeName, exist := getUniqueVolumeName(uniquePodName, podVolume.Name, mountedVolumesForPod)
if !exist {
// Volume not exist in ASW, we assume it hasn't been mounted yet. If it needs resize,
// it will be handled as offline resize(if it indeed hasn't been mounted yet),
// or online resize in subsequent loop(after we confirm it has been mounted).
return
}
// volumeSpec.ReadOnly is the value that determines if volume could be formatted when being mounted.
// This is the same flag that determines filesystem resizing behaviour for offline resizing and hence
// we should use it here. This value comes from Pod.spec.volumes.persistentVolumeClaim.readOnly.
if volumeSpec.ReadOnly {
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
klog.V(5).InfoS("Skip file system resize check for the volume, as the volume is mounted as readonly", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
return
}
pvCap := volumeSpec.PersistentVolume.Spec.Capacity.Storage()
pvcStatusCap := pvc.Status.Capacity.Storage()
dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap)
// in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value
dswp.actualStateOfWorld.InitializeClaimSize(uniqueVolumeName, pvcStatusCap)
}
func getUniqueVolumeName(
podName volumetypes.UniquePodName,
outerVolumeSpecName string,
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume) (v1.UniqueVolumeName, bool) {
mountedVolumes, exist := mountedVolumesForPod[podName]
if !exist {
return "", false
}
mountedVolume, exist := mountedVolumes[outerVolumeSpecName]
if !exist {
return "", false
}
return mountedVolume.VolumeName, true
}
// podPreviouslyProcessed returns true if the volumes for this pod have already
// been processed/reprocessed by the populator. Otherwise, the volumes for this pod need to
// be reprocessed.
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
podName volumetypes.UniquePodName) bool {
dswp.pods.RLock()
defer dswp.pods.RUnlock()
return dswp.pods.processedPods[podName]
}
// markPodProcessingFailed marks the specified pod from processedPods as false to indicate that it failed processing
func (dswp *desiredStateOfWorldPopulator) markPodProcessingFailed(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
dswp.pods.processedPods[podName] = false
dswp.pods.Unlock()
}
// podHasBeenSeenOnce returns true if the pod has been seen by the popoulator
// at least once.
func (dswp *desiredStateOfWorldPopulator) podHasBeenSeenOnce(
podName volumetypes.UniquePodName) bool {
dswp.pods.RLock()
_, exist := dswp.pods.processedPods[podName]
dswp.pods.RUnlock()
return exist
}
// markPodProcessed records that the volumes for the specified pod have been
// processed by the populator
func (dswp *desiredStateOfWorldPopulator) markPodProcessed(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
dswp.pods.processedPods[podName] = true
}
// deleteProcessedPod removes the specified pod from processedPods
func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
delete(dswp.pods.processedPods, podName)
}
// createVolumeSpec creates and returns a mutable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed.
// Returns an error if unable to obtain the volume at this time.
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
podVolume v1.Volume, pod *v1.Pod, mounts, devices sets.String) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
pvcSource := podVolume.VolumeSource.PersistentVolumeClaim
isEphemeral := pvcSource == nil && podVolume.VolumeSource.Ephemeral != nil
if isEphemeral {
// Generic ephemeral inline volumes are handled the
// same way as a PVC reference. The only additional
// constraint (checked below) is that the PVC must be
// owned by the pod.
pvcSource = &v1.PersistentVolumeClaimVolumeSource{
ClaimName: ephemeral.VolumeClaimName(pod, &podVolume),
}
}
if pvcSource != nil {
klog.V(5).InfoS("Found PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName))
// If podVolume is a PVC, fetch the real PV behind the claim
pvc, err := dswp.getPVCExtractPV(
pod.Namespace, pvcSource.ClaimName)
if err != nil {
return nil, nil, "", fmt.Errorf(
"error processing PVC %s/%s: %v",
pod.Namespace,
pvcSource.ClaimName,
err)
}
if isEphemeral {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return nil, nil, "", err
}
}
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
klog.V(5).InfoS("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName), "PVCUID", pvcUID, "PVName", pvName)
// Fetch actual PV object
volumeSpec, volumeGidValue, err :=
dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID)
if err != nil {
return nil, nil, "", fmt.Errorf(
"error processing PVC %s/%s: %v",
pod.Namespace,
pvcSource.ClaimName,
err)
}
klog.V(5).InfoS("Extracted volumeSpec from bound PV and PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName), "PVCUID", pvcUID, "PVName", pvName, "volumeSpecName", volumeSpec.Name())
migratable, err := dswp.csiMigratedPluginManager.IsMigratable(volumeSpec)
if err != nil {
return nil, nil, "", err
}
if migratable {
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil {
return nil, nil, "", err
}
}
// TODO: replace this with util.GetVolumeMode() when features.BlockVolume is removed.
// The function will return the right value then.
volumeMode := v1.PersistentVolumeFilesystem
if volumeSpec.PersistentVolume != nil && volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
volumeMode = *volumeSpec.PersistentVolume.Spec.VolumeMode
}
// TODO: remove features.BlockVolume checks / comments after no longer needed
// Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem.
// Do not check feature gate here to make sure even when the feature is disabled in kubelet,
// because controller-manager / API server can already contain block PVs / PVCs.
if mounts.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeFilesystem {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeMounts",
podVolume.Name,
volumeMode)
}
// Error if a container has volumeDevices but the volumeMode of PVC isn't Block
if devices.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeBlock {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeDevices",
podVolume.Name,
volumeMode)
}
return pvc, volumeSpec, volumeGidValue, nil
}
// Do not return the original volume object, since the source could mutate it
clonedPodVolume := podVolume.DeepCopy()
spec := volume.NewSpecFromVolume(clonedPodVolume)
migratable, err := dswp.csiMigratedPluginManager.IsMigratable(spec)
if err != nil {
return nil, nil, "", err
}
if migratable {
spec, err = csimigration.TranslateInTreeSpecToCSI(spec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil {
return nil, nil, "", err
}
}
return nil, spec, "", nil
}
// getPVCExtractPV fetches the PVC object with the given namespace and name from
// the API server, checks whether PVC is being deleted, extracts the name of the PV
// it is pointing to and returns it.
// An error is returned if the PVC object's phase is not "Bound".
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
namespace string, claimName string) (*v1.PersistentVolumeClaim, error) {
pvc, err :=
dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{})
if err != nil || pvc == nil {
return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err)
}
// Pods that uses a PVC that is being deleted must not be started.
//
// In case an old kubelet is running without this check or some kubelets
// have this feature disabled, the worst that can happen is that such
// pod is scheduled. This was the default behavior in 1.8 and earlier
// and users should not be that surprised.
// It should happen only in very rare case when scheduler schedules
// a pod and user deletes a PVC that's used by it at the same time.
if pvc.ObjectMeta.DeletionTimestamp != nil {
return nil, errors.New("PVC is being deleted")
}
if pvc.Status.Phase != v1.ClaimBound {
return nil, errors.New("PVC is not bound")
}
if pvc.Spec.VolumeName == "" {
return nil, errors.New("PVC has empty pvc.Spec.VolumeName")
}
return pvc, nil
}
// getPVSpec fetches the PV object with the given name from the API server
// and returns a volume.Spec representing it.
// An error is returned if the call to fetch the PV object fails.
func (dswp *desiredStateOfWorldPopulator) getPVSpec(
name string,
pvcReadOnly bool,
expectedClaimUID types.UID) (*volume.Spec, string, error) {
pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil || pv == nil {
return nil, "", fmt.Errorf(
"failed to fetch PV %s from API server: %v", name, err)
}
if pv.Spec.ClaimRef == nil {
return nil, "", fmt.Errorf(
"found PV object %s but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
name)
}
if pv.Spec.ClaimRef.UID != expectedClaimUID {
return nil, "", fmt.Errorf(
"found PV object %s but its pv.Spec.ClaimRef.UID %s does not point to claim.UID %s",
name,
pv.Spec.ClaimRef.UID,
expectedClaimUID)
}
volumeGidValue := getPVVolumeGidAnnotationValue(pv)
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil
}
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok {
return volumeGid
}
return ""
}
相关信息
相关文章
0
赞
热门推荐
-
2、 - 优质文章
-
3、 gate.io
-
7、 golang
-
9、 openharmony
-
10、 Vue中input框自动聚焦