@Override public MappingWorksheet.Mapping map(Queue.Task task, MappingWorksheet worksheet) { LOG.log(FINER, "map({0}, {1}])", new Object[]{task.getFullDisplayName(), worksheet.works.size()}); if (task instanceof AbstractProject) { return map((AbstractProject) task, worksheet); } else { return _fallback.map(task, worksheet); } }
private MappingWorksheet.ExecutorChunk findExecutor(MappingWorksheet worksheet, DockerJobSlave slave) { for (MappingWorksheet.ExecutorChunk executor : worksheet.executors) { if (executor.node == slave) { return executor; } } return null; }
/** * Decides nodes to run tasks on. * * @param task the root task. * @param worksheet an object containing information of subtasks to execute and nodes tasks can execute on. * * @return mapping from subtasks to nodes. * * @see hudson.model.LoadBalancer#map(hudson.model.Queue.Task, hudson.model.queue.MappingWorksheet) */ @Override public Mapping map(Task task, MappingWorksheet worksheet) { Mapping m = worksheet.new Mapping(); // retrieve scoringRuleList not to behave inconsistently when configuration is updated. List<ScoringRule> scoringRuleList = getScoringRuleList(); if(isEnabled()) { try { if(assignGreedily(m, task, worksheet, scoringRuleList)) { return m; } else { return null; } } catch(Exception e) { LOGGER.log(Level.SEVERE, "Failed to load balance with scores: fallback to preconfigured LoadBalancer", e); } } if(getFallback() != null) { return getFallback().map(task, worksheet); } LOGGER.severe("No LoadBalancer to fall back is defined: Builds are NEVER launched."); return null; }
public ProvisionResult provisionJob(final String jobName, AbstractProject job, MappingWorksheet.WorkChunk task) throws Exception { JobValidationResult result = validateJob(task.assignedLabel).orNull(); if (result == null) { return ProvisionResult.NOT_SUPPORTED; } DockerJobProperty jobConfig = (DockerJobProperty) job.getProperty(DockerJobProperty.class); final String imageName = getImageName(jobConfig, result); boolean resetJob = false; Map<String, String> jobEnv = result.environment; if (jobConfig != null) { resetJob = jobConfig.resetJobEnabled(); Map<String, String> newEnv = newHashMap(jobEnv); newEnv.putAll(jobConfig.getEnvironmentVars()); jobEnv = ImmutableMap.copyOf(newEnv); } if (isNullOrEmpty(imageName)) { throw new RuntimeException(format("Unable to find docker image for job %s", jobName)); } if (availableCapacity() <= 0) { return ProvisionResult.NO_CAPACITY; } // Provision DockerJobSlave SlaveOptions options = new SlaveOptions(jobName, imageName); options.setCleanEnvironment(resetJob); options.setEnvironment(jobEnv); options.setDirectoryMappings(_directoryMappings); final DockerJobSlave slave = new DockerJobSlave( jobName + '-' + RandomStringUtils.random(6, true, true), "Job running in docker container", jobName, "/", ImmutableSet.<LabelAtom>builder() .addAll(result.labels) .add(new LabelAtom("image/" + imageName)) .build(), new DockerJobComputerLauncher(getDisplayName(), options)); _jenkins.addNode(slave); Computer.threadPoolForRemoting.submit(new Runnable() { @Override public void run() { try { Computer slaveComputer = slave.toComputer(); slaveComputer.connect(false).get(); } catch (Exception ex) { LOG.log(SEVERE, format("Error provisioning docker slave: job=%s image=%s", jobName, imageName), ex); throw Throwables.propagate(ex); } } }); return ProvisionResult.SUCCESS; }
@Override public MappingWorksheet.Mapping map(Queue.Task task, MappingWorksheet worksheet) { return null; }
private boolean assignGreedily(Mapping m, Task task, MappingWorksheet worksheet, List<ScoringRule> scoringRuleList) throws Exception { return assignGreedily(m, task, worksheet, scoringRuleList, 0); }
/** * Decide nodes to execute subtasks on. * * Decides in a greedy and recursive way as following steps: * <ol> * <li>Pick the first subtask</li> * <li>Score all nodes by calling all enabled {@link ScoringRule}</li> * <li>Pick the node with the highest score. Assign that node to the current subtask.</li> * <li>Pick the next subtask, and back to 2. Scoring is performed for each subtasks, * for the case scores differ for each subtask.</li> * <li>If assignment is failed (e.g. some constrains is broken), pick the next node, and back to 3.</li> * <li>If assignment is succeeded, return that assignment.</li> * </ol> * * @param m * @param task * @param worksheet * @param scoringRuleList * @param targetWorkChunk * @return whether an proper assignment is found. * * @throws Exception */ private boolean assignGreedily(Mapping m, Task task, MappingWorksheet worksheet, List<ScoringRule> scoringRuleList, int targetWorkChunk) throws Exception { if(targetWorkChunk >= worksheet.works.size()) { return m.isCompletelyValid(); } // Current target work chunk (subtask). WorkChunk wc = worksheet.works(targetWorkChunk); // Initialize nodes-to-scores map. List<ExecutorChunk> executors = new ArrayList<ExecutorChunk>(wc.applicableExecutorChunks()); NodesScore nodesScore = new NodesScore(executors); // Score nodes by calling enabled ScoringRules. for(ScoringRule scoringRule: getScoringRuleList()) { if(!scoringRule.updateScores(task, wc, m, nodesScore)) { break; } } sortExecutors(executors, nodesScore); if(isReportScoresEnabled()) { reportScores(wc, executors, nodesScore); } for(ExecutorChunk ec: executors) { if(nodesScore.isInvalid(ec)) { continue; } m.assign(targetWorkChunk, ec); if( m.isPartiallyValid() && assignGreedily(m, task, worksheet, scoringRuleList, targetWorkChunk + 1) ) { return true; } } m.assign(targetWorkChunk,null); // Reset assignment return false; }