@Override public QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName()); queueInfo.setCapacity(1.0f); if (clusterResource.getMemory() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) usedResource.getMemory() / clusterResource.getMemory()); } queueInfo.setMaximumCapacity(1.0f); queueInfo.setChildQueues(new ArrayList<QueueInfo>()); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
public void verifyClusterSchedulerFifoGeneric(String type, String state, float capacity, float usedCapacity, int minQueueCapacity, int maxQueueCapacity, int numNodes, int usedNodeCapacity, int availNodeCapacity, int totalNodeCapacity, int numContainers) throws JSONException, Exception { assertEquals("type doesn't match", "fifoScheduler", type); assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state); assertEquals("capacity doesn't match", 1.0, capacity, 0.0); assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0); assertEquals( "minQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, minQueueCapacity); assertEquals("maxQueueMemoryCapacity doesn't match", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, maxQueueCapacity); assertEquals("numNodes doesn't match", 0, numNodes); assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity); assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity); assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity); assertEquals("numContainers doesn't match", 0, numContainers); }
@Test public void testGetQueueInfo() throws Exception { QueueCLI cli = createAndGetQueueCLI(); Set<String> nodeLabels = new HashSet<String>(); nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, nodeLabels, "GPU"); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : " + "GPU"); pw.println("\tAccessible Node Labels : " + "JDK_7,GPU"); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
@Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : "); pw.println("\tAccessible Node Labels : "); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
@Test public void testEnums() throws Exception { for (YarnApplicationState applicationState : YarnApplicationState.values()) { TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED); } // ad hoc test of NEW_SAVING, which is newly added Assert.assertEquals(State.PREP, TypeConverter.fromYarn( YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED)); for (TaskType taskType : TaskType.values()) { TypeConverter.fromYarn(taskType); } for (JobState jobState : JobState.values()) { TypeConverter.fromYarn(jobState); } for (QueueState queueState : QueueState.values()) { TypeConverter.fromYarn(queueState); } for (TaskState taskState : TaskState.values()) { TypeConverter.fromYarn(taskState); } }
/** * Test that child queues are converted too during conversion of the parent * queue */ @Test public void testFromYarnQueue() { //Define child queue org.apache.hadoop.yarn.api.records.QueueInfo child = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING); //Define parent queue org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); List<org.apache.hadoop.yarn.api.records.QueueInfo> children = new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>(); children.add(child); //Add one child Mockito.when(queueInfo.getChildQueues()).thenReturn(children); Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING); //Call the function we're testing org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); //Verify that the converted queue has the 1 child we had added Assert.assertEquals("QueueInfo children weren't properly converted", returned.getQueueChildren().size(), 1); }
@Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null, null, true); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : " + NodeLabel.DEFAULT_NODE_LABEL_PARTITION); pw.println("\tAccessible Node Labels : "); pw.println("\tPreemption : " + "disabled"); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); }
@Override public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(getQueueName()); // TODO: we might change these queue metrics around a little bit // to match the semantics of the fair scheduler. queueInfo.setCapacity((float) getFairShare().getMemory() / scheduler.getClusterCapacity().getMemory()); queueInfo.setCapacity((float) getResourceUsage().getMemory() / scheduler.getClusterCapacity().getMemory()); ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>(); if (includeChildQueues) { Collection<FSQueue> childQueues = getChildQueues(); for (FSQueue child : childQueues) { childQueueInfos.add(child.getQueueInfo(recursive, recursive)); } } queueInfo.setChildQueues(childQueueInfos); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }
@Override public QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName()); queueInfo.setCapacity(1.0f); if (clusterResource.getMemorySize() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { queueInfo.setCurrentCapacity((float) usedResource.getMemorySize() / clusterResource.getMemorySize()); } queueInfo.setMaximumCapacity(1.0f); queueInfo.setChildQueues(new ArrayList<QueueInfo>()); queueInfo.setQueueState(QueueState.RUNNING); return queueInfo; }