spring、hibernate源码分析一

扫码关注公众号:Java 技术驿站

发送:vip
将链接复制到本浏览器,永久解锁本站全部文章

【公众号:Java 技术驿站】 【加作者微信交流技术,拉技术群】
    <bean id="dataSource" class="com.mchange.v2.c3p0.ComboPooledDataSource"
            destroy-method="close">
            <property name="driverClass" value="${jdbc.driver}"></property>
            <property name="jdbcUrl" value="${jdbc.url}"></property>
            <property name="user" value="${jdbc.username}"></property>
            <property name="password" value="${jdbc.password}"></property>
        </bean>

        <bean id="sessionFactory"
            class="org.springframework.orm.hibernate3.annotation.AnnotationSessionFactoryBean">
            <property name="dataSource" ref="dataSource" />

            <property name="hibernateProperties">
                <props>
                    <prop key="hibernate.dialect">${hibernate.dialect}</prop>
                    <prop key="hibernate.show_sql">${hibernate.show_sql}</prop>
                    <prop key="current_session_context_class">thread</prop>
                    <prop key="hibernate.hbm2ddl.auto">${hibernate.auto}</prop>
                    <prop key="hibernate.connection.provider_class">{hibernate.connection.provider_class}</prop>
                    <prop key="hibernate.search.default.directory_provider">org.hibernate.search.store.FSDirectoryProvider</prop>
                    <prop key="hibernate.search.default.indexBase">F:/temp/index</prop>
                </props>
            </property>

            <property name="annotatedClasses">
                <list>
                    <value>com.zyn.ssh.pojo.Student</value>
                    <value>com.zyn.ssh.pojo.Teacher</value>
                    <value>com.zyn.ssh.pojo.Course</value>
                    <value>com.zyn.ssh.pojo.StudentInfo</value>
                </list>
            </property>
        </bean>

进入AnnotationSessionFactoryBean

    public class AnnotationSessionFactoryBean extends LocalSessionFactoryBean implements ResourceLoaderAware {

    private static final String RESOURCE_PATTERN = "/**/*.class";
            //实体对象
    private Class[] annotatedClasses;

    private String[] annotatedPackages;

    private String[] packagesToScan;

    private TypeFilter[] entityTypeFilters = new TypeFilter[] {
    new AnnotationTypeFilter(Entity.class, false),
    new AnnotationTypeFilter(Embeddable.class, false),
    new AnnotationTypeFilter(MappedSuperclass.class, false),
    new AnnotationTypeFilter(org.hibernate.annotations.Entity.class, false)};

    public AnnotationSessionFactoryBean() {
    setConfigurationClass(AnnotationConfiguration.class);
    }

    }

注入实体列表,初始化使用hibernate的AnnotationConfiguration
进入LocalSessionFactoryBean

    public class LocalSessionFactoryBean extends AbstractSessionFactoryBean implements BeanClassLoaderAware {

    private Resource[] configLocations;

    private String[] mappingResources;

    private Resource[] mappingLocations;

    private Resource[] cacheableMappingLocations;

    private Resource[] mappingJarLocations;

    private Resource[] mappingDirectoryLocations;

    private Properties hibernateProperties;

    private TransactionManager jtaTransactionManager;

    private Object cacheRegionFactory;

    private CacheProvider cacheProvider;

    private LobHandler lobHandler;

    private Interceptor entityInterceptor;

    private NamingStrategy namingStrategy;

    private TypeDefinitionBean[] typeDefinitions;

    private FilterDefinition[] filterDefinitions;

    private Properties entityCacheStrategies;

    private Properties collectionCacheStrategies;

    private boolean schemaUpdate = false;

    private Configuration configuration;

    protected SessionFactory buildSessionFactory() throws Exception {
         Configuration config = newConfiguration();
         //省略中间的组装过程,将配置文件组装成Configuration对象
         return newSessionFactory(config);
    }

    protected SessionFactory newSessionFactory(Configuration config) throws HibernateException {
    return config.buildSessionFactory();
    }

    }

接收datasource配置、hibernateProperties配置,并通过AnnotationConfiguration组装成Configuration对象
进入Configuration,核心方法是buildSessionFactory

    public SessionFactory buildSessionFactory() throws HibernateException {
    secondPassCompile();
    if ( ! metadataSourceQueue.isEmpty() ) {
    log.warn( "mapping metadata cache was not completely processed" );
    }

    enableLegacyHibernateValidator();
    enableBeanValidation();
    enableHibernateSearch();

    validate();
    Environment.verifyProperties( properties );
    Properties copy = new Properties();
    copy.putAll( properties );
    PropertiesHelper.resolvePlaceHolders( copy );
    Settings settings = buildSettings( copy );
    return new SessionFactoryImpl(
    this,
    mapping,
    settings,
    getInitializedEventListeners(),
    sessionFactoryObserver
    );
    }

重点注意:
Settings settings = buildSettings( copy );这里是以后创建session需要的默认参数,跟踪进入到SettingsFactory

    public Settings buildSettings(Properties props) {
    Settings settings = new Settings();

    //SessionFactory name:

    String sessionFactoryName = props.getProperty(Environment.SESSION_FACTORY_NAME);
    settings.setSessionFactoryName(sessionFactoryName);

    //JDBC and connection settings:

    ConnectionProvider connections = createConnectionProvider(props);
    settings.setConnectionProvider(connections);

    //Interrogate JDBC metadata

    boolean metaSupportsScrollable = false;
    boolean metaSupportsGetGeneratedKeys = false;
    boolean metaSupportsBatchUpdates = false;
    boolean metaReportsDDLCausesTxnCommit = false;
    boolean metaReportsDDLInTxnSupported = true;
    Dialect dialect = null;
    JdbcSupport jdbcSupport = null;

    // 'hibernate.temp.use_jdbc_metadata_defaults' is a temporary magic value.
    // The need for it is intended to be alleviated with future development, thus it is
    // not defined as an Environment constant...
    //
    // it is used to control whether we should consult the JDBC metadata to determine
    // certain Settings default values; it is useful to *not* do this when the database
    // may not be available (mainly in tools usage).
    boolean useJdbcMetadata = PropertiesHelper.getBoolean( "hibernate.temp.use_jdbc_metadata_defaults", props, true );
    if ( useJdbcMetadata ) {
    try {
    Connection conn = connections.getConnection();
    try {
    DatabaseMetaData meta = conn.getMetaData();

    dialect = DialectFactory.buildDialect( props, conn );
    jdbcSupport = JdbcSupportLoader.loadJdbcSupport( conn );

    metaSupportsScrollable = meta.supportsResultSetType( ResultSet.TYPE_SCROLL_INSENSITIVE );
    metaSupportsBatchUpdates = meta.supportsBatchUpdates();
    metaReportsDDLCausesTxnCommit = meta.dataDefinitionCausesTransactionCommit();
    metaReportsDDLInTxnSupported = !meta.dataDefinitionIgnoredInTransactions();
    metaSupportsGetGeneratedKeys = meta.supportsGetGeneratedKeys();

    log.info( "Database ->\n" +
    "       name : " + meta.getDatabaseProductName() + '\n' +
    "    version : " +  meta.getDatabaseProductVersion() + '\n' +
    "      major : " + meta.getDatabaseMajorVersion() + '\n' +
    "      minor : " + meta.getDatabaseMinorVersion()
    );
    log.info( "Driver ->\n" +
    "       name : " + meta.getDriverName() + '\n' +
    "    version : " + meta.getDriverVersion() + '\n' +
    "      major : " + meta.getDriverMajorVersion() + '\n' +
    "      minor : " + meta.getDriverMinorVersion()
    );
    }
    catch ( SQLException sqle ) {
    log.warn( "Could not obtain connection metadata", sqle );
    }
    finally {
    connections.closeConnection( conn );
    }
    }
    catch ( SQLException sqle ) {
    log.warn( "Could not obtain connection to query metadata", sqle );
    dialect = DialectFactory.buildDialect( props );
    }
    catch ( UnsupportedOperationException uoe ) {
    // user supplied JDBC connections
    dialect = DialectFactory.buildDialect( props );
    }
    }
    else {
    dialect = DialectFactory.buildDialect( props );
    }

    settings.setDataDefinitionImplicitCommit( metaReportsDDLCausesTxnCommit );
    settings.setDataDefinitionInTransactionSupported( metaReportsDDLInTxnSupported );
    settings.setDialect( dialect );
    if ( jdbcSupport == null ) {
    jdbcSupport = JdbcSupportLoader.loadJdbcSupport( null );
    }
    settings.setJdbcSupport( jdbcSupport );

    //use dialect default properties
    final Properties properties = new Properties();
    properties.putAll( dialect.getDefaultProperties() );
    properties.putAll( props );

    // Transaction settings:

    TransactionFactory transactionFactory = createTransactionFactory(properties);
    settings.setTransactionFactory(transactionFactory);
    settings.setTransactionManagerLookup( createTransactionManagerLookup(properties) );

    boolean flushBeforeCompletion = PropertiesHelper.getBoolean(Environment.FLUSH_BEFORE_COMPLETION, properties);
    log.info("Automatic flush during beforeCompletion(): " + enabledDisabled(flushBeforeCompletion) );
    settings.setFlushBeforeCompletionEnabled(flushBeforeCompletion);

    boolean autoCloseSession = PropertiesHelper.getBoolean(Environment.AUTO_CLOSE_SESSION, properties);
    log.info("Automatic session close at end of transaction: " + enabledDisabled(autoCloseSession) );
    settings.setAutoCloseSessionEnabled(autoCloseSession);

    //JDBC and connection settings:

    int batchSize = PropertiesHelper.getInt(Environment.STATEMENT_BATCH_SIZE, properties, 0);
    if ( !metaSupportsBatchUpdates ) batchSize = 0;
    if (batchSize>0) log.info("JDBC batch size: " + batchSize);
    settings.setJdbcBatchSize(batchSize);
    boolean jdbcBatchVersionedData = PropertiesHelper.getBoolean(Environment.BATCH_VERSIONED_DATA, properties, false);
    if (batchSize>0) log.info("JDBC batch updates for versioned data: " + enabledDisabled(jdbcBatchVersionedData) );
    settings.setJdbcBatchVersionedData(jdbcBatchVersionedData);
    settings.setBatcherFactory( createBatcherFactory(properties, batchSize) );

    boolean useScrollableResultSets = PropertiesHelper.getBoolean(Environment.USE_SCROLLABLE_RESULTSET, properties, metaSupportsScrollable);
    log.info("Scrollable result sets: " + enabledDisabled(useScrollableResultSets) );
    settings.setScrollableResultSetsEnabled(useScrollableResultSets);

    boolean wrapResultSets = PropertiesHelper.getBoolean(Environment.WRAP_RESULT_SETS, properties, false);
    log.debug( "Wrap result sets: " + enabledDisabled(wrapResultSets) );
    settings.setWrapResultSetsEnabled(wrapResultSets);

    boolean useGetGeneratedKeys = PropertiesHelper.getBoolean(Environment.USE_GET_GENERATED_KEYS, properties, metaSupportsGetGeneratedKeys);
    log.info("JDBC3 getGeneratedKeys(): " + enabledDisabled(useGetGeneratedKeys) );
    settings.setGetGeneratedKeysEnabled(useGetGeneratedKeys);

    Integer statementFetchSize = PropertiesHelper.getInteger(Environment.STATEMENT_FETCH_SIZE, properties);
    if (statementFetchSize!=null) log.info("JDBC result set fetch size: " + statementFetchSize);
    settings.setJdbcFetchSize(statementFetchSize);

    String releaseModeName = PropertiesHelper.getString( Environment.RELEASE_CONNECTIONS, properties, "auto" );
    log.info( "Connection release mode: " + releaseModeName );
    ConnectionReleaseMode releaseMode;
    if ( "auto".equals(releaseModeName) ) {
    releaseMode = transactionFactory.getDefaultReleaseMode();
    }
    else {
    releaseMode = ConnectionReleaseMode.parse( releaseModeName );
    if ( releaseMode == ConnectionReleaseMode.AFTER_STATEMENT && !connections.supportsAggressiveRelease() ) {
    log.warn( "Overriding release mode as connection provider does not support 'after_statement'" );
    releaseMode = ConnectionReleaseMode.AFTER_TRANSACTION;
    }
    }
    settings.setConnectionReleaseMode( releaseMode );

    //SQL Generation settings:

    String defaultSchema = properties.getProperty(Environment.DEFAULT_SCHEMA);
    String defaultCatalog = properties.getProperty(Environment.DEFAULT_CATALOG);
    if (defaultSchema!=null) log.info("Default schema: " + defaultSchema);
    if (defaultCatalog!=null) log.info("Default catalog: " + defaultCatalog);
    settings.setDefaultSchemaName(defaultSchema);
    settings.setDefaultCatalogName(defaultCatalog);

    Integer maxFetchDepth = PropertiesHelper.getInteger(Environment.MAX_FETCH_DEPTH, properties);
    if (maxFetchDepth!=null) log.info("Maximum outer join fetch depth: " + maxFetchDepth);
    settings.setMaximumFetchDepth(maxFetchDepth);
    int batchFetchSize = PropertiesHelper.getInt(Environment.DEFAULT_BATCH_FETCH_SIZE, properties, 1);
    log.info("Default batch fetch size: " + batchFetchSize);
    settings.setDefaultBatchFetchSize(batchFetchSize);

    boolean comments = PropertiesHelper.getBoolean(Environment.USE_SQL_COMMENTS, properties);
    log.info( "Generate SQL with comments: " + enabledDisabled(comments) );
    settings.setCommentsEnabled(comments);

    boolean orderUpdates = PropertiesHelper.getBoolean(Environment.ORDER_UPDATES, properties);
    log.info( "Order SQL updates by primary key: " + enabledDisabled(orderUpdates) );
    settings.setOrderUpdatesEnabled(orderUpdates);

    boolean orderInserts = PropertiesHelper.getBoolean(Environment.ORDER_INSERTS, properties);
    log.info( "Order SQL inserts for batching: " + enabledDisabled( orderInserts ) );
    settings.setOrderInsertsEnabled( orderInserts );

    //Query parser settings:

    settings.setQueryTranslatorFactory( createQueryTranslatorFactory(properties) );

    Map querySubstitutions = PropertiesHelper.toMap(Environment.QUERY_SUBSTITUTIONS, " ,=;:\n\t\r\f", properties);
    log.info("Query language substitutions: " + querySubstitutions);
    settings.setQuerySubstitutions(querySubstitutions);

    boolean jpaqlCompliance = PropertiesHelper.getBoolean( Environment.JPAQL_STRICT_COMPLIANCE, properties, false );
    settings.setStrictJPAQLCompliance( jpaqlCompliance );
    log.info( "JPA-QL strict compliance: " + enabledDisabled( jpaqlCompliance ) );

    // Second-level / query cache:

    boolean useSecondLevelCache = PropertiesHelper.getBoolean(Environment.USE_SECOND_LEVEL_CACHE, properties, true);
    log.info( "Second-level cache: " + enabledDisabled(useSecondLevelCache) );
    settings.setSecondLevelCacheEnabled(useSecondLevelCache);

    boolean useQueryCache = PropertiesHelper.getBoolean(Environment.USE_QUERY_CACHE, properties);
    log.info( "Query cache: " + enabledDisabled(useQueryCache) );
    settings.setQueryCacheEnabled(useQueryCache);

    // The cache provider is needed when we either have second-level cache enabled
    // or query cache enabled.  Note that useSecondLevelCache is enabled by default
    settings.setRegionFactory( createRegionFactory( properties, ( useSecondLevelCache || useQueryCache ) ) );

    boolean useMinimalPuts = PropertiesHelper.getBoolean(
    Environment.USE_MINIMAL_PUTS, properties, settings.getRegionFactory().isMinimalPutsEnabledByDefault()
    );
    log.info( "Optimize cache for minimal puts: " + enabledDisabled(useMinimalPuts) );
    settings.setMinimalPutsEnabled(useMinimalPuts);

    String prefix = properties.getProperty(Environment.CACHE_REGION_PREFIX);
    if ( StringHelper.isEmpty(prefix) ) prefix=null;
    if (prefix!=null) log.info("Cache region prefix: "+ prefix);
    settings.setCacheRegionPrefix(prefix);

    boolean useStructuredCacheEntries = PropertiesHelper.getBoolean(Environment.USE_STRUCTURED_CACHE, properties, false);
    log.info( "Structured second-level cache entries: " + enabledDisabled(useStructuredCacheEntries) );
    settings.setStructuredCacheEntriesEnabled(useStructuredCacheEntries);

    if (useQueryCache) settings.setQueryCacheFactory( createQueryCacheFactory(properties) );

    //SQL Exception converter:

    SQLExceptionConverter sqlExceptionConverter;
    try {
    sqlExceptionConverter = SQLExceptionConverterFactory.buildSQLExceptionConverter( dialect, properties );
    }
    catch(HibernateException e) {
    log.warn("Error building SQLExceptionConverter; using minimal converter");
    sqlExceptionConverter = SQLExceptionConverterFactory.buildMinimalSQLExceptionConverter();
    }
    settings.setSQLExceptionConverter(sqlExceptionConverter);

    //Statistics and logging:

    boolean showSql = PropertiesHelper.getBoolean(Environment.SHOW_SQL, properties);
    if (showSql) log.info("Echoing all SQL to stdout");
    //   settings.setShowSqlEnabled(showSql);

    boolean formatSql = PropertiesHelper.getBoolean(Environment.FORMAT_SQL, properties);
    //   settings.setFormatSqlEnabled(formatSql);

    settings.setSqlStatementLogger( new SQLStatementLogger( showSql, formatSql ) );

    boolean useStatistics = PropertiesHelper.getBoolean(Environment.GENERATE_STATISTICS, properties);
    log.info( "Statistics: " + enabledDisabled(useStatistics) );
    settings.setStatisticsEnabled(useStatistics);

    boolean useIdentifierRollback = PropertiesHelper.getBoolean(Environment.USE_IDENTIFIER_ROLLBACK, properties);
    log.info( "Deleted entity synthetic identifier rollback: " + enabledDisabled(useIdentifierRollback) );
    settings.setIdentifierRollbackEnabled(useIdentifierRollback);

    //Schema export:

    String autoSchemaExport = properties.getProperty(Environment.HBM2DDL_AUTO);
    if ( "validate".equals(autoSchemaExport) ) settings.setAutoValidateSchema(true);
    if ( "update".equals(autoSchemaExport) ) settings.setAutoUpdateSchema(true);
    if ( "create".equals(autoSchemaExport) ) settings.setAutoCreateSchema(true);
    if ( "create-drop".equals(autoSchemaExport) ) {
    settings.setAutoCreateSchema(true);
    settings.setAutoDropSchema(true);
    }
    settings.setImportFiles( properties.getProperty( Environment.HBM2DDL_IMPORT_FILES ) );

    EntityMode defaultEntityMode = EntityMode.parse( properties.getProperty( Environment.DEFAULT_ENTITY_MODE ) );
    log.info( "Default entity-mode: " + defaultEntityMode );
    settings.setDefaultEntityMode( defaultEntityMode );

    boolean namedQueryChecking = PropertiesHelper.getBoolean( Environment.QUERY_STARTUP_CHECKING, properties, true );
    log.info( "Named query checking : " + enabledDisabled( namedQueryChecking ) );
    settings.setNamedQueryStartupCheckingEnabled( namedQueryChecking );

    boolean checkNullability = PropertiesHelper.getBoolean(Environment.CHECK_NULLABILITY, properties, true);
    log.info( "Check Nullability in Core (should be disabled when Bean Validation is on): " + enabledDisabled(checkNullability) );
    settings.setCheckNullability(checkNullability);

    //   String provider = properties.getProperty( Environment.BYTECODE_PROVIDER );
    //   log.info( "Bytecode provider name : " + provider );
    //   BytecodeProvider bytecodeProvider = buildBytecodeProvider( provider );
    //   settings.setBytecodeProvider( bytecodeProvider );

    return settings;

    }

session的默认参数就是在这里创建的,重点注意

    boolean flushBeforeCompletion = PropertiesHelper.getBoolean(Environment.FLUSH_BEFORE_COMPLETION, properties);

    settings.setFlushBeforeCompletionEnabled(flushBeforeCompletion);

    boolean autoCloseSession = PropertiesHelper.getBoolean(Environment.AUTO_CLOSE_SESSION, properties);

    settings.setAutoCloseSessionEnabled(autoCloseSession);

继续跟踪:PropertiesHelper.getBoolean(Environment.AUTO_CLOSE_SESSION, properties);

    public static boolean getBoolean(String propertyName, Properties properties) {
    return getBoolean( propertyName, properties, false );
    }
    String releaseModeName = PropertiesHelper.getString( Environment.RELEASE_CONNECTIONS, properties, "auto" );
    public static boolean getBoolean(String propertyName, Properties properties, boolean defaultValue) {
    String value = extractPropertyValue( propertyName, properties );
    return value == null ? defaultValue : Boolean.valueOf( value ).booleanValue();
    }

走到这里就豁然开朗了
原来flushBeforeCompletion和autoCloseSession默认都是false。releaseModeName 为true


如果要将默认设置为true:在hibernate属性配置中加入
hibernate.transaction.auto_close_session=true
hibernate.transaction.flush_before_completion=true


来源:[]()

赞(0) 打赏
版权归原创作者所有,任何形式的转载请联系博主:daming_90:Java 技术驿站 » spring、hibernate源码分析一

评论 抢沙发

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

微信扫一扫打赏