public TableBasedDeleteHandlerImpl( SessionFactoryImplementor factory, HqlSqlWalker walker, String catalog, String schema) { super( factory, walker, catalog, schema ); DeleteStatement deleteStatement = ( DeleteStatement ) walker.getAST(); FromElement fromElement = deleteStatement.getFromClause().getFromElement(); this.targetedPersister = fromElement.getQueryable(); final String bulkTargetAlias = fromElement.getTableAlias(); final ProcessedWhereClause processedWhereClause = processWhereClause( deleteStatement.getWhereClause() ); this.idSelectParameterSpecifications = processedWhereClause.getIdSelectParameterSpecifications(); this.idInsertSelect = generateIdInsertSelect( targetedPersister, bulkTargetAlias, processedWhereClause ); log.tracev( "Generated ID-INSERT-SELECT SQL (multi-table delete) : {0}", idInsertSelect ); final String idSubselect = generateIdSubselect( targetedPersister ); deletes = new ArrayList<String>(); // If many-to-many, delete the FK row in the collection table. // This partially overlaps with DeleteExecutor, but it instead uses the temp table in the idSubselect. for ( Type type : targetedPersister.getPropertyTypes() ) { if ( type.isCollectionType() ) { CollectionType cType = (CollectionType) type; AbstractCollectionPersister cPersister = (AbstractCollectionPersister)factory.getCollectionPersister( cType.getRole() ); if ( cPersister.isManyToMany() ) { deletes.add( generateDelete( cPersister.getTableName(), cPersister.getKeyColumnNames(), idSubselect, "bulk delete - m2m join table cleanup")); } } } String[] tableNames = targetedPersister.getConstraintOrderedTableNameClosure(); String[][] columnNames = targetedPersister.getContraintOrderedTableKeyColumnClosure(); for ( int i = 0; i < tableNames.length; i++ ) { // TODO : an optimization here would be to consider cascade deletes and not gen those delete statements; // the difficulty is the ordering of the tables here vs the cascade attributes on the persisters -> // the table info gotten here should really be self-contained (i.e., a class representation // defining all the needed attributes), then we could then get an array of those deletes.add( generateDelete( tableNames[i], columnNames[i], idSubselect, "bulk delete")); } }
public DeleteExecutor(HqlSqlWalker walker, Queryable persister) { super( walker, persister ); final SessionFactoryImplementor factory = walker.getSessionFactoryHelper().getFactory(); final Dialect dialect = factory.getDialect(); try { final DeleteStatement deleteStatement = (DeleteStatement) walker.getAST(); final String idSubselectWhere; if ( deleteStatement.hasWhereClause() ) { final AST whereClause = deleteStatement.getWhereClause(); final SqlGenerator gen = new SqlGenerator( factory ); gen.whereClause( whereClause ); parameterSpecifications = gen.getCollectedParameters(); idSubselectWhere = gen.getSQL().length() > 7 ? gen.getSQL() : ""; } else { parameterSpecifications = new ArrayList<ParameterSpecification>(); idSubselectWhere = ""; } // If many-to-many, delete the FK row in the collection table. for ( Type type : persister.getPropertyTypes() ) { if ( type.isCollectionType() ) { final CollectionType cType = (CollectionType) type; final AbstractCollectionPersister cPersister = (AbstractCollectionPersister) factory .getCollectionPersister( cType.getRole() ); if ( cPersister.isManyToMany() ) { if ( persister.getIdentifierColumnNames().length > 1 && !dialect.supportsTuplesInSubqueries() ) { LOG.warn( "This dialect is unable to cascade the delete into the many-to-many join table" + " when the entity has multiple primary keys. Either properly setup cascading on" + " the constraints or manually clear the associations prior to deleting the entities." ); } else { final String idSubselect = "(select " + StringHelper.join( ", ", persister.getIdentifierColumnNames() ) + " from " + persister.getTableName() + idSubselectWhere + ")"; final String where = "(" + StringHelper.join( ", ", cPersister.getKeyColumnNames() ) + ") in " + idSubselect; final Delete delete = new Delete().setTableName( cPersister.getTableName() ).setWhere( where ); if ( factory.getSettings().isCommentsEnabled() ) { delete.setComment( "delete FKs in join table" ); } deletes.add( delete.toStatementString() ); } } } } } catch (RecognitionException e) { throw new HibernateException( "Unable to delete the FKs in the join table!", e ); } }
public CTEBasedDeleteHandlerImpl(SessionFactoryImplementor factory, HqlSqlWalker walker, String catalog, String schema) { super(factory, walker, catalog, schema); DeleteStatement deleteStatement = (DeleteStatement) walker.getAST(); FromElement fromElement = deleteStatement.getFromClause() .getFromElement(); this.targetedPersister = fromElement.getQueryable(); final ProcessedWhereClause processedWhereClause = processWhereClause(deleteStatement .getWhereClause()); this.idSelectParameterSpecifications = processedWhereClause .getIdSelectParameterSpecifications(); final String bulkTargetAlias = fromElement.getTableAlias(); this.idSelect = generateIdSelect(targetedPersister, bulkTargetAlias, processedWhereClause); final String idSubselect = generateIdSubselect( targetedPersister); deletes = new ArrayList<String>(); // If many-to-many, delete the FK row in the collection table. // This partially overlaps with DeleteExecutor, but it instead uses the // temp table in the idSubselect. for (Type type : targetedPersister.getPropertyTypes()) { if (type.isCollectionType()) { CollectionType cType = (CollectionType) type; AbstractCollectionPersister cPersister = (AbstractCollectionPersister) factory .getCollectionPersister(cType.getRole()); if (cPersister.isManyToMany()) { deletes.add(generateDelete(cPersister.getTableName(), cPersister.getKeyColumnNames(), idSubselect, "bulk delete - m2m join table cleanup")); } } } String[] tableNames = targetedPersister .getConstraintOrderedTableNameClosure(); String[][] columnNames = targetedPersister .getContraintOrderedTableKeyColumnClosure(); for (int i = 0; i < tableNames.length; i++) { // TODO : an optimization here would be to consider cascade deletes // and not gen those delete statements; // the difficulty is the ordering of the tables here vs the cascade // attributes on the persisters -> // the table info gotten here should really be self-contained (i.e., // a class representation // defining all the needed attributes), then we could then get an // array of those deletes.add(generateDelete(tableNames[i], columnNames[i], idSubselect, "bulk delete")); } }
public CTEBasedDeleteHandlerImpl(SessionFactoryImplementor factory, HqlSqlWalker walker, String catalog, String schema) { super(factory, walker, catalog, schema); DeleteStatement deleteStatement = (DeleteStatement) walker.getAST(); FromElement fromElement = deleteStatement.getFromClause() .getFromElement(); this.targetedPersister = fromElement.getQueryable(); final ProcessedWhereClause processedWhereClause = processWhereClause(deleteStatement .getWhereClause()); this.idSelectParameterSpecifications = processedWhereClause .getIdSelectParameterSpecifications(); final String bulkTargetAlias = fromElement.getTableAlias(); this.idSelect = generateIdSelect(targetedPersister, bulkTargetAlias, processedWhereClause); final String idSubselect = generateIdSubselect(targetedPersister); deletes = new ArrayList<String>(); // If many-to-many, delete the FK row in the collection table. // This partially overlaps with DeleteExecutor, but it instead uses the // temp table in the idSubselect. for (Type type : targetedPersister.getPropertyTypes()) { if (type.isCollectionType()) { CollectionType cType = (CollectionType) type; AbstractCollectionPersister cPersister = (AbstractCollectionPersister) factory .getCollectionPersister(cType.getRole()); if (cPersister.isManyToMany()) { deletes.add(generateDelete(cPersister.getTableName(), cPersister.getKeyColumnNames(), idSubselect, "bulk delete - m2m join table cleanup")); } } } String[] tableNames = targetedPersister .getConstraintOrderedTableNameClosure(); String[][] columnNames = targetedPersister .getContraintOrderedTableKeyColumnClosure(); for (int i = 0; i < tableNames.length; i++) { // TODO : an optimization here would be to consider cascade deletes // and not gen those delete statements; // the difficulty is the ordering of the tables here vs the cascade // attributes on the persisters -> // the table info gotten here should really be self-contained (i.e., // a class representation // defining all the needed attributes), then we could then get an // array of those deletes.add(generateDelete(tableNames[i], columnNames[i], idSubselect, "bulk delete")); } }