From 6ee0fedb3df2fdfbe737ed5718931cbc90bcc746 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Fri, 6 Jul 2018 20:07:37 -0700 Subject: [PATCH] HBASE-20859 Backup and incremental load could fail in secure clusters. Signed-off-by: tedyu --- .../hadoop/hbase/backup/util/BackupUtils.java | 2 +- .../hadoop/hbase/backup/TestBackupUtils.java | 80 +++++++++++++++++++ .../org/apache/hadoop/hbase/HConstants.java | 4 + .../hbase/mapreduce/HFileOutputFormat2.java | 2 +- .../mapreduce/TestHFileOutputFormat2.java | 54 +++++++++++++ 5 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index e01849ae1038..12f2f2f846bc 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -695,7 +695,7 @@ public static Path getBulkOutputDir(String tableName, Configuration conf, boolea throws IOException { FileSystem fs = FileSystem.get(conf); String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); + fs.getHomeDirectory() + "/hbase-staging"); Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" + EnvironmentEdgeManager.currentTime()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java new file mode 100644 index 000000000000..01481a0f65b3 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.security.PrivilegedAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(SmallTests.class) +public class TestBackupUtils { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupUtils.class); + private static final Logger LOG = LoggerFactory.getLogger(TestBackupUtils.class); + + protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static Configuration conf = TEST_UTIL.getConfiguration(); + + @Test + public void TestGetBulkOutputDir() { + // Create a user who is not the current user + String fooUserName = "foo1234"; + String fooGroupName = "group1"; + UserGroupInformation + ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + // Get user's home directory + Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { + @Override public Path run() { + try (FileSystem fs = FileSystem.get(conf)) { + return fs.getHomeDirectory(); + } catch (IOException ioe) { + LOG.error("Failed to get foo's home directory", ioe); + } + return null; + } + }); + + Path bulkOutputDir = ugi.doAs(new PrivilegedAction() { + @Override public Path run() { + try { + return BackupUtils.getBulkOutputDir("test", conf, false); + } catch (IOException ioe) { + LOG.error("Failed to get bulk output dir path", ioe); + } + return null; + } + }); + // Make sure the directory is in foo1234's home directory + Assert.assertTrue(bulkOutputDir.toString().startsWith(fooHomeDirectory.toString())); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index fa1a772c0f42..2e23484cb477 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1352,6 +1352,10 @@ public enum OperationStatusCode { /** Config key for hbase temporary directory in hdfs */ public static final String TEMPORARY_FS_DIRECTORY_KEY = "hbase.fs.tmp.dir"; + + /** Don't use it! This'll get you the wrong path in a secure cluster. + * Use FileSystem.getHomeDirectory() or + * "/user/" + UserGroupInformation.getCurrentUser().getShortUserName() */ public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/" + System.getProperty("user.name") + "/hbase-staging"; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index a403455d6d9c..0063c56a993f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -820,7 +820,7 @@ static void configurePartitioner(Job job, List splitPoin FileSystem fs = FileSystem.get(conf); String hbaseTmpFsDir = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); + fs.getHomeDirectory() + "/hbase-staging"); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 09444acc4ed4..286610373d4a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -24,12 +24,15 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Mockito.verify; import java.io.IOException; import java.lang.reflect.Field; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -100,6 +103,9 @@ import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Assert; import org.junit.ClassRule; import org.junit.Ignore; import org.junit.Test; @@ -1494,5 +1500,53 @@ private String getStoragePolicyNameForOldHDFSVersion(FileSystem fs, Path path) { return null; } + + @Test + public void TestConfigurePartitioner() throws IOException { + Configuration conf = util.getConfiguration(); + // Create a user who is not the current user + String fooUserName = "foo1234"; + String fooGroupName = "group1"; + UserGroupInformation + ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + // Get user's home directory + Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { + @Override public Path run() { + try (FileSystem fs = FileSystem.get(conf)) { + return fs.makeQualified(fs.getHomeDirectory()); + } catch (IOException ioe) { + LOG.error("Failed to get foo's home directory", ioe); + } + return null; + } + }); + + Job job = Mockito.mock(Job.class); + Mockito.doReturn(conf).when(job).getConfiguration(); + ImmutableBytesWritable writable = new ImmutableBytesWritable(); + List splitPoints = new LinkedList(); + splitPoints.add(writable); + + ugi.doAs(new PrivilegedAction() { + @Override public Void run() { + try { + HFileOutputFormat2.configurePartitioner(job, splitPoints, false); + } catch (IOException ioe) { + LOG.error("Failed to configure partitioner", ioe); + } + return null; + } + }); + FileSystem fs = FileSystem.get(conf); + // verify that the job uses TotalOrderPartitioner + verify(job).setPartitionerClass(TotalOrderPartitioner.class); + // verify that TotalOrderPartitioner.setPartitionFile() is called. + String partitionPathString = conf.get("mapreduce.totalorderpartitioner.path"); + Assert.assertNotNull(partitionPathString); + // Make sure the partion file is in foo1234's home directory, and that + // the file exists. + Assert.assertTrue(partitionPathString.startsWith(fooHomeDirectory.toString())); + Assert.assertTrue(fs.exists(new Path(partitionPathString))); + } }