mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	HMM patches for 5.3-rc
Fix the locking around nouveau's use of the hmm_range_* APIs. It works
 correctly in the success case, but many of the the edge cases have missing
 unlocks or double unlocks.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl07BG0ACgkQOG33FX4g
 mxr/RA//b1t3rTjyYlzEGpCFouDAJrV8mRrmPZtywzSxhiyKgylWiQ9D5HyAZ8ZG
 evEF1xFe0PcTKiieqnZCJBPh864t+yt9Mm45MpWamBNoHx7WPSdeOMbSDUNvQR+H
 8aWTGBZvdKlqpwD63yvk7C6jkZ6vXDNYROnM395gzlfmaVGBeLygXqcKUkiW1x+D
 1CK+KsBldacxH/gE2X966mXxG46/5VL8KDVoo4VVnpLMDRdRs6zbIBRj7l9+hWbh
 2HABQyvDJW4tYmUW5iHAoLV2fAIE/nJMprEabXvd6rFAPwbryBroguXffGqkIaa0
 Ce1LIhiakCUniK2XgP2W/+KwJQBNp3hQjJr+ip7hgQCtzcD8zRYSxDt5gUtbjpGd
 4JfXrRVrfa08/hBe4adPfE5W5mW3oyEyRHldToT0SrywIY8sTLjN7RdCMwOqrxoR
 QkgqDISLqJab1OQEPHr7QgsgO2c2k19yPpckSZJ+IIldpNtLa9V+eif85NZ/esOd
 2GTWph3UQiACp9fLgEIAvJUnZ0blZpYq9TYshWWYkO34M+KgBdqOn1cQhZH+4rWb
 0Ed/jGdIaPZZ7XaLDgz5e7jl+t+kmSBdqSQtunF4bbu7AwR/zt3es0jq2vFoD451
 syF2vSVKyoBZMESX8X0O2cv+HHpN5oqH1XLI1ABOO09X9lxAPl4=
 =ZdrW
 -----END PGP SIGNATURE-----
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull HMM fixes from Jason Gunthorpe:
 "Fix the locking around nouveau's use of the hmm_range_* APIs. It works
  correctly in the success case, but many of the the edge cases have
  missing unlocks or double unlocks.
  The diffstat is a bit big as Christoph did a comprehensive job to move
  the obsolete API from the core header and into the driver before
  fixing its flow, but the risk of regression from this code motion is
  low"
* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  nouveau: unlock mmap_sem on all errors from nouveau_range_fault
  nouveau: remove the block parameter to nouveau_range_fault
  mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau
  mm/hmm: always return EBUSY for invalid ranges in hmm_range_{fault,snapshot}
			
			
This commit is contained in:
		
						commit
						515f12b9ee
					
				
					 4 changed files with 49 additions and 64 deletions
				
			
		| 
						 | 
				
			
			@ -237,7 +237,7 @@ The usage pattern is::
 | 
			
		|||
      ret = hmm_range_snapshot(&range);
 | 
			
		||||
      if (ret) {
 | 
			
		||||
          up_read(&mm->mmap_sem);
 | 
			
		||||
          if (ret == -EAGAIN) {
 | 
			
		||||
          if (ret == -EBUSY) {
 | 
			
		||||
            /*
 | 
			
		||||
             * No need to check hmm_range_wait_until_valid() return value
 | 
			
		||||
             * on retry we will get proper error with hmm_range_snapshot()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
 | 
			
		|||
		fault->inst, fault->addr, fault->access);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool
 | 
			
		||||
nouveau_range_done(struct hmm_range *range)
 | 
			
		||||
{
 | 
			
		||||
	bool ret = hmm_range_valid(range);
 | 
			
		||||
 | 
			
		||||
	hmm_range_unregister(range);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
 | 
			
		||||
{
 | 
			
		||||
	long ret;
 | 
			
		||||
 | 
			
		||||
	range->default_flags = 0;
 | 
			
		||||
	range->pfn_flags_mask = -1UL;
 | 
			
		||||
 | 
			
		||||
	ret = hmm_range_register(range, mirror,
 | 
			
		||||
				 range->start, range->end,
 | 
			
		||||
				 PAGE_SHIFT);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		up_read(&range->vma->vm_mm->mmap_sem);
 | 
			
		||||
		return (int)ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
 | 
			
		||||
		up_read(&range->vma->vm_mm->mmap_sem);
 | 
			
		||||
		return -EAGAIN;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = hmm_range_fault(range, true);
 | 
			
		||||
	if (ret <= 0) {
 | 
			
		||||
		if (ret == 0)
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
		up_read(&range->vma->vm_mm->mmap_sem);
 | 
			
		||||
		hmm_range_unregister(range);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
nouveau_svm_fault(struct nvif_notify *notify)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
 | 
			
		|||
		range.values = nouveau_svm_pfn_values;
 | 
			
		||||
		range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
 | 
			
		||||
again:
 | 
			
		||||
		ret = hmm_vma_fault(&svmm->mirror, &range, true);
 | 
			
		||||
		ret = nouveau_range_fault(&svmm->mirror, &range);
 | 
			
		||||
		if (ret == 0) {
 | 
			
		||||
			mutex_lock(&svmm->mutex);
 | 
			
		||||
			if (!hmm_vma_range_done(&range)) {
 | 
			
		||||
			if (!nouveau_range_done(&range)) {
 | 
			
		||||
				mutex_unlock(&svmm->mutex);
 | 
			
		||||
				goto again;
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			@ -666,8 +707,8 @@ again:
 | 
			
		|||
						NULL);
 | 
			
		||||
			svmm->vmm->vmm.object.client->super = false;
 | 
			
		||||
			mutex_unlock(&svmm->mutex);
 | 
			
		||||
			up_read(&svmm->mm->mmap_sem);
 | 
			
		||||
		}
 | 
			
		||||
		up_read(&svmm->mm->mmap_sem);
 | 
			
		||||
 | 
			
		||||
		/* Cancel any faults in the window whose pages didn't manage
 | 
			
		||||
		 * to keep their valid bit, or stay writeable when required.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
 | 
			
		|||
 */
 | 
			
		||||
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
 | 
			
		||||
 | 
			
		||||
/* This is a temporary helper to avoid merge conflict between trees. */
 | 
			
		||||
static inline bool hmm_vma_range_done(struct hmm_range *range)
 | 
			
		||||
{
 | 
			
		||||
	bool ret = hmm_range_valid(range);
 | 
			
		||||
 | 
			
		||||
	hmm_range_unregister(range);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* This is a temporary helper to avoid merge conflict between trees. */
 | 
			
		||||
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
 | 
			
		||||
				struct hmm_range *range, bool block)
 | 
			
		||||
{
 | 
			
		||||
	long ret;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * With the old API the driver must set each individual entries with
 | 
			
		||||
	 * the requested flags (valid, write, ...). So here we set the mask to
 | 
			
		||||
	 * keep intact the entries provided by the driver and zero out the
 | 
			
		||||
	 * default_flags.
 | 
			
		||||
	 */
 | 
			
		||||
	range->default_flags = 0;
 | 
			
		||||
	range->pfn_flags_mask = -1UL;
 | 
			
		||||
 | 
			
		||||
	ret = hmm_range_register(range, mirror,
 | 
			
		||||
				 range->start, range->end,
 | 
			
		||||
				 PAGE_SHIFT);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return (int)ret;
 | 
			
		||||
 | 
			
		||||
	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * The mmap_sem was taken by driver we release it here and
 | 
			
		||||
		 * returns -EAGAIN which correspond to mmap_sem have been
 | 
			
		||||
		 * drop in the old API.
 | 
			
		||||
		 */
 | 
			
		||||
		up_read(&range->vma->vm_mm->mmap_sem);
 | 
			
		||||
		return -EAGAIN;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = hmm_range_fault(range, block);
 | 
			
		||||
	if (ret <= 0) {
 | 
			
		||||
		if (ret == -EBUSY || !ret) {
 | 
			
		||||
			/* Same as above, drop mmap_sem to match old API. */
 | 
			
		||||
			up_read(&range->vma->vm_mm->mmap_sem);
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
		} else if (ret == -EAGAIN)
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
		hmm_range_unregister(range);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Below are for HMM internal use only! Not to be used by device driver! */
 | 
			
		||||
static inline void hmm_mm_init(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										10
									
								
								mm/hmm.c
									
										
									
									
									
								
							
							
						
						
									
										10
									
								
								mm/hmm.c
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
 | 
			
		|||
 * @range: range
 | 
			
		||||
 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
 | 
			
		||||
 *          permission (for instance asking for write and range is read only),
 | 
			
		||||
 *          -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
 | 
			
		||||
 *          -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
 | 
			
		||||
 *          vma or it is illegal to access that range), number of valid pages
 | 
			
		||||
 *          in range->pfns[] (from range start address).
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
 | 
			
		|||
	do {
 | 
			
		||||
		/* If range is no longer valid force retry. */
 | 
			
		||||
		if (!range->valid)
 | 
			
		||||
			return -EAGAIN;
 | 
			
		||||
			return -EBUSY;
 | 
			
		||||
 | 
			
		||||
		vma = find_vma(hmm->mm, start);
 | 
			
		||||
		if (vma == NULL || (vma->vm_flags & device_vma))
 | 
			
		||||
| 
						 | 
				
			
			@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
 | 
			
		|||
 | 
			
		||||
	do {
 | 
			
		||||
		/* If range is no longer valid force retry. */
 | 
			
		||||
		if (!range->valid) {
 | 
			
		||||
			up_read(&hmm->mm->mmap_sem);
 | 
			
		||||
			return -EAGAIN;
 | 
			
		||||
		}
 | 
			
		||||
		if (!range->valid)
 | 
			
		||||
			return -EBUSY;
 | 
			
		||||
 | 
			
		||||
		vma = find_vma(hmm->mm, start);
 | 
			
		||||
		if (vma == NULL || (vma->vm_flags & device_vma))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue