diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8b731d53e9f48e422e3e9c1c26aadc2f746cdd77..afff3ac8706732be09fd50a69d70f3930f30ca39 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2331,7 +2331,7 @@ static void remap_page(struct page *page, unsigned int nr)
 {
 	int i;
 
-	/* If TTU_SPLIT_FREEZE is ever extended to file, remove this check */
+	/* If unmap_page() uses try_to_migrate() on file, remove this check */
 	if (!PageAnon(page))
 		return;
 	if (PageTransHuge(page)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 37c24672125ccd650d8342f075f44d1cc3c181d6..746013e282c3649e0fe0d01104b79b66f80c1045 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1439,8 +1439,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 	while (page_vma_mapped_walk(&pvmw)) {
 		/*
 		 * If the page is mlock()d, we cannot swap it out.
-		 * If it's recently referenced (perhaps page_referenced
-		 * skipped over this mm) then we should reactivate it.
 		 */
 		if (!(flags & TTU_IGNORE_MLOCK)) {
 			if (vma->vm_flags & VM_LOCKED) {
@@ -1687,8 +1685,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
  * @arg: enum ttu_flags will be passed to this argument.
  *
  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
- * containing migration entries. This and TTU_RMAP_LOCKED are the only supported
- * flags.
+ * containing migration entries.
  */
 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 		     unsigned long address, void *arg)
@@ -1928,8 +1925,6 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
  *
  * Tries to remove all the page table entries which are mapping this page and
  * replace them with special swap entries. Caller must hold the page lock.
- *
- * If is successful, return true. Otherwise, false.
  */
 void try_to_migrate(struct page *page, enum ttu_flags flags)
 {