Syntax highlighter header

Saturday 12 September 2020

Faster way of creating ZIP file

Zip file is normally used for distributing files and directories.  Zip file serves two purposes one is compressing the file and another of creating a achive of multiple files and directories. Now a days with faster internet speed and precompressed files like jpg and mp4 etc. It will be faster to disable compression altogether to speed up the compression. I wrote a simple program in java to create zip file without compression. I was using a mp4 file as content of zip file. With compression enabled it was taking 5 time more time and giving a compression of 5% which is not worth for time spent. Following is the source code:


package zipperformance;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.zip.Deflater;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import java.util.zip.CRC32;

public class ZipCreater {
	public static void main(String[] args) {
		
		long startTime = System.currentTimeMillis();
		zipDirectory("files", "test.zip");
		long endTime = System.currentTimeMillis();
		
		System.out.println(endTime-startTime);
		
		
	}
	
    public static void zipDirectory( String dirPath, String zipFilePath )
    {

        FileOutputStream fos = null;
        try
        {
            // Create the file output streams for both the file and the zip.

            File file = new File( zipFilePath );
            if(file.getParentFile()!=null) {
            	file.getParentFile().mkdirs();
            }
            fos = new FileOutputStream( zipFilePath );
            BufferedOutputStream bos = new BufferedOutputStream(fos);
            ZipOutputStream zos = new ZipOutputStream( fos );
            zos.setLevel(Deflater.BEST_SPEED);
            dirFunc( dirPath, dirPath, zos );

            // Close the file output streams for both the file and the zip.
            zos.flush();
            zos.close();
            fos.close();
        }
        catch ( IOException e )
        {
        	e.printStackTrace();
        }
    }
    
    private static void dirFunc( String dirName, String baseExportPath, ZipOutputStream zos )
    {
        try
        {
            File dirObj = new File( dirName );
            if ( dirObj.exists() == true )
            {
                if ( dirObj.isDirectory() == true )
                {
                    // Create an array of File objects, one for each file or directory in dirObj.
                    File[] fileList = dirObj.listFiles();
                    // Loop through File array and display.
                    for ( int i = 0; i < fileList.length; i++ )
                    {
                        if ( fileList[i].isDirectory() )
                        {
                            dirFunc( fileList[i].getPath(), baseExportPath, zos );
                        }
                        else if ( fileList[i].isFile() )

                        {
                            // Call the zipFunc function
                            zipFunc( fileList[i].getPath(), baseExportPath, zos );
                        }
                    }
                }
            }
        }
        catch ( Exception e )
        {
            e.printStackTrace();
        }
    }

    private static void zipFunc( String filePath, String baseExportPath, ZipOutputStream zos )
    {
        try
        {
            String absolutePath = filePath;
            String prefixFilePath = baseExportPath;

            int index = prefixFilePath.length();

            filePath = filePath.substring( ++index );

            // Create a file input stream and a buffered input stream.
            long size = Files.size(Paths.get(absolutePath));
            FileInputStream fis = new FileInputStream( absolutePath );
            BufferedInputStream bis = new BufferedInputStream( fis );
            CRC32 crc32 = new CRC32();
            byte[] data = new byte[1024*128];
            int byteCount;

            // Create a loop that reads from the buffered input stream and writes
            // to the zip output stream until the bis has been entirely read.
            while ( ( byteCount = bis.read( data, 0, data.length ) ) > -1 )
            {
                crc32.update( data, 0, byteCount );
            }
            long crc=crc32.getValue();

            // Create a Zip Entry and put it into the archive (no data yet).
            ZipEntry fileEntry = new ZipEntry( filePath );
            fileEntry.setMethod(ZipEntry.STORED);
            fileEntry.setSize(size);
            fileEntry.setCrc(crc);
            zos.putNextEntry( fileEntry );

            bis.close();
            fis = new FileInputStream( absolutePath );
            bis = new BufferedInputStream( fis );
            
            // Create a loop that reads from the buffered input stream and writes
            // to the zip output stream until the bis has been entirely read.
            while ( ( byteCount = bis.read( data, 0, data.length ) ) > -1 )
            {
                zos.write( data, 0, byteCount );
            }
        }
        catch ( IOException e )
        {
        	e.printStackTrace();
        }
    }
}


java.lang.ClassNotFoundException: Could not load requested class : org.postgresql.Driver in Wildfly

Recently I was trying migrate our application from wildfly 10 to wildfly 20. We faced the following error:


Unable to create requested service [org.hibernate.engine.jdbc.env.spi.JdbcEnvironment]
        at org.jboss.as.jpa.service.PersistenceUnitServiceImpl$1$1.run(PersistenceUnitServiceImpl.java:198)
        at org.jboss.as.jpa.service.PersistenceUnitServiceImpl$1$1.run(PersistenceUnitServiceImpl.java:128)
        at org.wildfly.security.manager.WildFlySecurityManager.doChecked(WildFlySecurityManager.java:658)
        at org.jboss.as.jpa.service.PersistenceUnitServiceImpl$1.run(PersistenceUnitServiceImpl.java:212)
        at org.jboss.threads.ContextClassLoaderSavingRunnable.run(ContextClassLoaderSavingRunnable.java:35)
        at org.jboss.threads.EnhancedQueueExecutor.safeRun(EnhancedQueueExecutor.java:1982)
        at org.jboss.threads.EnhancedQueueExecutor$ThreadBody.doRunTask(EnhancedQueueExecutor.java:1486)
        at org.jboss.threads.EnhancedQueueExecutor$ThreadBody.run(EnhancedQueueExecutor.java:1377)
        at java.lang.Thread.run(Thread.java:748)
        at org.jboss.threads.JBossThread.run(JBossThread.java:485)
Caused by: org.hibernate.service.spi.ServiceException: Unable to create requested service [org.hibernate.engine.jdbc.env.spi.JdbcEnvironment]
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.createService(AbstractServiceRegistryImpl.java:275)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.initializeService(AbstractServiceRegistryImpl.java:237)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.getService(AbstractServiceRegistryImpl.java:214)
        at org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory.injectServices(DefaultIdentifierGeneratorFactory.java:152)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.injectDependencies(AbstractServiceRegistryImpl.java:286)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.initializeService(AbstractServiceRegistryImpl.java:243)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.getService(AbstractServiceRegistryImpl.java:214)
        at org.hibernate.boot.internal.InFlightMetadataCollectorImpl.<init>(InFlightMetadataCollectorImpl.java:179)
        at org.hibernate.boot.model.process.spi.MetadataBuildingProcess.complete(MetadataBuildingProcess.java:119)
        at org.hibernate.jpa.boot.internal.EntityManagerFactoryBuilderImpl.metadata(EntityManagerFactoryBuilderImpl.java:1215)
        at org.hibernate.jpa.boot.internal.EntityManagerFactoryBuilderImpl.build(EntityManagerFactoryBuilderImpl.java:1246)
        at org.jboss.as.jpa.hibernate5.TwoPhaseBootstrapImpl.build(TwoPhaseBootstrapImpl.java:44)
        at org.jboss.as.jpa.service.PersistenceUnitServiceImpl$1$1.run(PersistenceUnitServiceImpl.java:170)
        ... 9 more
Caused by: org.hibernate.boot.registry.classloading.spi.ClassLoadingException: HHH010003: JDBC Driver class not found: org.postgresql.Driver
        at org.hibernate.c3p0.internal.C3P0ConnectionProvider.configure(C3P0ConnectionProvider.java:130)
        at org.hibernate.boot.registry.internal.StandardServiceRegistryImpl.configureService(StandardServiceRegistryImpl.java:100)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.initializeService(AbstractServiceRegistryImpl.java:246)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.getService(AbstractServiceRegistryImpl.java:214)
        at org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentInitiator.buildJdbcConnectionAccess(JdbcEnvironmentInitiator.java:145)
        at org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentInitiator.initiateService(JdbcEnvironmentInitiator.java:66)
        at org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentInitiator.initiateService(JdbcEnvironmentInitiator.java:35)
        at org.hibernate.boot.registry.internal.StandardServiceRegistryImpl.initiateService(StandardServiceRegistryImpl.java:94)
        at org.hibernate.service.internal.AbstractServiceRegistryImpl.createService(AbstractServiceRegistryImpl.java:263)
        ... 21 more
Caused by: org.hibernate.boot.registry.classloading.spi.ClassLoadingException: Unable to load class [org.postgresql.Driver]
        at org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl.classForName(ClassLoaderServiceImpl.java:134)
        at org.hibernate.c3p0.internal.C3P0ConnectionProvider.configure(C3P0ConnectionProvider.java:127)
        ... 29 more
Caused by: java.lang.ClassNotFoundException: Could not load requested class : org.postgresql.Driver
        at org.hibernate.boot.registry.classloading.internal.AggregatedClassLoader.findClass(AggregatedClassLoader.java:210)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        at java.lang.Class.forName0(Native Method)
        at java.lang.Class.forName(Class.java:348)
        at org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl.classForName(ClassLoaderServiceImpl.java:131)
        ... 30 more

Here I am going to provide solution to the problem. For loading postgresql classes you need to create a module for postgres and reference that module in your application jar.

Steps for creating modue is:

  1. Copy postgres jar to /home/wildfly/modules/system/layers/base/org/postgres/main/postgresql-42.2.2.jar
  2. Create /home/wildfly/modules/system/layers/base/org/postgres/main/module.xml file with following content.

<module xmlns="urn:jboss:module:1.7" name="org.postgres">
        <resources>
                <resource-root path="postgresql-42.2.2.jar" />
        </resources>
        <dependencies>
                <module name="javax.api"/>
                <module name="javax.transaction.api"/>
        </dependencies>
</module>

Now you need to refer the module from your application JAR. For doing that you need to create jboss-deployment-structure.xml file with following content:


<?xml version="1.0" encoding="UTF-8"?>
  <jboss-deployment-structure>
      <ear-subdeployments-isolated>false</ear-subdeployments-isolated>
          <deployment>
                <resources>
                        <resource-root path="myapplication.jar" />
                </resources>
                <dependencies>
                    <module name="org.hibernate" export="true"/>
                    <module name="javax.api" />
                    <module name="org.postgres" export="true"/>
                    <module name="org.infinispan" />
                    <module name="org.apache.commons.lang" />
                </dependencies>
                <exclusions>
                        <module name="org.jboss.ejb-client"/>
                </exclusions>

          </deployment>
  </jboss-deployment-structure>

Please note that export="true" is required, without this class will not be loaded.



Friday 11 September 2020

HHH010002: C3P0 using driver: null at URL: null

Recently I was working on porting our application to wildfly 20 from wildfly 10 it involved upgrading hibernate to 5.3.17.  We were using C3P0 connection pool in our persistence.xml. We got the following error in our logs


2020-09-11 19:28:35,785 INFO  [org.hibernate.engine.jdbc.connections.internal.ConnectionProviderInitiator] (ServerService Thread Pool -- 86) HHH000130: Instantiating explicit connection provider: org.hibernate.service.jdbc.connections.internal.C3P0ConnectionProvider
2020-09-11 19:28:35,790 INFO  [org.hibernate.c3p0.internal.C3P0ConnectionProvider] (ServerService Thread Pool -- 86) HHH010002: C3P0 using driver: null at URL: null
2020-09-11 19:28:35,790 INFO  [org.hibernate.c3p0.internal.C3P0ConnectionProvider] (ServerService Thread Pool -- 86) HHH10001001: Connection properties: {user=pgctools, password=****}
2020-09-11 19:28:35,790 INFO  [org.hibernate.c3p0.internal.C3P0ConnectionProvider] (ServerService Thread Pool -- 86) HHH10001003: Autocommit mode: false
2020-09-11 19:28:35,791 WARN  [org.hibernate.c3p0.internal.C3P0ConnectionProvider] (ServerService Thread Pool -- 86) HHH10001006: No JDBC Driver class was specified by property hibernate.connection.driver_class
2020-09-11 19:28:35,824 INFO  [com.mchange.v2.log.MLog] (MLog-Init-Reporter) MLog clients using slf4j logging.
2020-09-11 19:28:35,965 INFO  [com.mchange.v2.c3p0.C3P0Registry] (ServerService Thread Pool -- 86) Initializing c3p0-0.9.5.2 [built 08-December-2015 22:06:04 -0800; debug? true; trace: 10]
2020-09-11 19:28:36,021 INFO  [org.hibernate.c3p0.internal.C3P0ConnectionProvider] (ServerService Thread Pool -- 86) HHH10001007: JDBC isolation level: <unknown>
2020-09-11 19:28:36,050 INFO  [com.mchange.v2.c3p0.impl.AbstractPoolBackedDataSource] (ServerService Thread Pool -- 86) Initializing c3p0 pool... com.mchange.v2.c3p0.PoolBackedDataSource@8c1576a0 [ connectionPoolDataSource -> com.mchange.v2.c3p0.WrapperConnectionPoolDataSource@9df832bb [ acquireIncrement -> 3, acquireRetryAttempts -> 30, acquireRetryDelay -> 1000, autoCommitOnClose -> true, automaticTestTable -> null, breakAfterAcquireFailure -> false, checkoutTimeout -> 0, connectionCustomizerClassName -> null, connectionTesterClassName -> com.mchange.v2.c3p0.impl.DefaultConnectionTester, contextClassLoaderSource -> caller, debugUnreturnedConnectionStackTraces -> false, factoryClassLocation -> null, forceIgnoreUnresolvedTransactions -> false, forceSynchronousCheckins -> false, identityToken -> 1hge13bacyt6x751iz2mtg|5014cd2c, idleConnectionTestPeriod -> 0, initialPoolSize -> 20, maxAdministrativeTaskTime -> 0, maxConnectionAge -> 0, maxIdleTime -> 300, maxIdleTimeExcessConnections -> 0, maxPoolSize -> 200, maxStatements -> 50, maxStatementsPerConnection -> 0, minPoolSize -> 20, nestedDataSource -> com.mchange.v2.c3p0.DriverManagerDataSource@974ea785 [ description -> null, driverClass -> null, factoryClassLocation -> null, forceUseNamedDriverClass -> false, identityToken -> 1hge13bacyt6x751iz2mtg|5051daf8, jdbcUrl -> null, properties -> {user=******, password=******} ], preferredTestQuery -> SELECT 1;, privilegeSpawnedThreads -> false, propertyCycle -> 0, statementCacheNumDeferredCloseThreads -> 0, testConnectionOnCheckin -> true, testConnectionOnCheckout -> false, unreturnedConnectionTimeout -> 0, usesTraditionalReflectiveProxies -> false; userOverrides: {} ], dataSourceName -> null, extensions -> {}, factoryClassLocation -> null, identityToken -> 1hge13bacyt6x751iz2mtg|18755649, numHelperThreads -> 3 ]
2020-09-11 19:29:06,113 WARN  [com.mchange.v2.resourcepool.BasicResourcePool] (C3P0PooledConnectionPoolManager[identityToken->1hge13bacyt6x751iz2mtg|18755649]-HelperThread-#2) com.mchange.v2.resourcepool.BasicResourcePool$ScatteredAcquireTask@3c1f0be -- Acquisition Attempt Failed!!! Clearing pending acquires. While trying to acquire a needed new resource, we failed to succeed more than the maximum number of allowed acquisition attempts (30). Last acquisition attempt exception: : java.lang.NullPointerException
        at org.postgresql.Driver.parseURL(Driver.java:547)
        at org.postgresql.Driver.acceptsURL(Driver.java:466)
        at java.sql.DriverManager.getDriver(DriverManager.java:299)
        at com.mchange.v2.c3p0.DriverManagerDataSource.driver(DriverManagerDataSource.java:285)
        at com.mchange.v2.c3p0.DriverManagerDataSource.getConnection(DriverManagerDataSource.java:175)
        at com.mchange.v2.c3p0.WrapperConnectionPoolDataSource.getPooledConnection(WrapperConnectionPoolDataSource.java:220)
        at com.mchange.v2.c3p0.WrapperConnectionPoolDataSource.getPooledConnection(WrapperConnectionPoolDataSource.java:206)
        at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool$1PooledConnectionResourcePoolManager.acquireResource(C3P0PooledConnectionPool.java:203)
        at com.mchange.v2.resourcepool.BasicResourcePool.doAcquire(BasicResourcePool.java:1138)
        at com.mchange.v2.resourcepool.BasicResourcePool.doAcquireAndDecrementPendingAcquiresWithinLockOnSuccess(BasicResourcePool.java:1125)
        at com.mchange.v2.resourcepool.BasicResourcePool.access$700(BasicResourcePool.java:44)
        at com.mchange.v2.resourcepool.BasicResourcePool$ScatteredAcquireTask.run(BasicResourcePool.java:1870)
        at com.mchange.v2.async.ThreadPoolAsynchronousRunner$PoolThread.run(ThreadPoolAsynchronousRunner.java:696)

The connection pool was unable to create connection because of missing database URL and driver class. Although the database URL and driver class was configured in persistence.xml still the hibernate was not able to pick it up. I had to download hibernate code and debug it for two days to figure out the problem. Our persistence.xml looked like this:


<persistence xmlns="http://java.sun.com/xml/ns/persistence"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
   xsi:schemaLocation="http://java.sun.com/xml/ns/persistence http://java.sun.com/xml/ns/persistence/persistence_1_0.xsd"
   version="1.0">
   <persistence-unit name="UnitName" transaction-type="JTA">
        <properties>
            <property name="jboss.as.jpa.providerModule" value="org.hibernate"/>
        </properties>
    <provider>org.hibernate.ejb.HibernatePersistence</provider>
      <jta-data-source>java:/PostgresDS</jta-data-source>

      <properties>
        <property name="hibernate.dialect" value="org.hibernate.dialect.PostgreSQLDialect"/>
        <property name="hibernate.show_sql" value="false"/>
        <property name="hibernate.format_sql" value="false" />
        <property name="hibernate.transaction.jta.platform" value="org.hibernate.service.jta.platform.internal.JBossAppServerJtaPlatform"/>
        <property name="hibernate.transaction.manager_lookup_class" value="org.hibernate.transaction.JBossTransactionManagerLookup"/>
        <property name="hibernate.session_factory_name" value="java:/jboss/SessionFactory"/>
        <property name="hibernate.cache.use_query_cache" value="true"/>
        <property name="hibernate.cache.use_second_level_cache" value="true"/>
        <property name="hibernate.cache.infinispan.cachemanager" value="java:jboss/infinispan/container/hibernate"/>
        <property name="hibernate.current_session_context_class" value="jta" />

       <property name="hibernate.connection.provider_class" value="org.hibernate.service.jdbc.connections.internal.C3P0ConnectionProvider"/>
       <property name="hibernate.connection.driver_class" value="org.postgresql.Driver" />
       <property name="hibernate.connection.url" value="jdbc:postgresql://localhost:5432/database" />
       <property name="hibernate.connection.username" value="user" />
       <property name="hibernate.connection.password" value="password" />
       <property name="hibernate.c3p0.min_size" value="20"/>
       <property name="hibernate.c3p0.max_size" value="200"/>
       <property name="hibernate.c3p0.timeout" value="300"/>
       <property name="hibernate.c3p0.max_statements" value="50"/>
       <property name="hibernate.c3p0.idle_test_periods" value="3000"/>
       <property name="hibernate.c3p0.testConnectionOnCheckin" value="true"/>
       <property name="hibernate.c3p0.preferredTestQuery" value="SELECT 1;"/>
       <property name="hibernate.c3p0.autoCommitOnClose" value="true"/>
      </properties>
   </persistence-unit>
</persistence>

The root cause of the problem was the datasource mentioned in persistence.xml file. Hibernate removes URL and database driver settings from configuration if datasource is provided in the configuration. It is because if datasource is provided then datasource will be used for getting connections so there is no use of database URL and driver class. But C3P0 connection pool required that. I removed the jta-data-source setting and it still did not work because wildfly inserted a default datasource with name ExampleDS into our persistence unit. We need to disable insertion of default data source using a wildfly specific property in persistence.xml file.

The following line needs to be inserted:


<property name="wildfly.jpa.allowdefaultdatasourceuse" value="false" />

After adding the above property C3P0 connection pool started working.




Saturday 5 September 2020

Configuring Log4j 2

In this post I am going to explain configuring Log4J 2 to log high severity log messages to console and file both and lower severity messages only to log file. This way console is not cluttered with unnecessary messages and detail information is available for debugging in log file.

I will define two appenders A and R. The appender A has a threshold filter applied to it which accepts only log messages with severity level ERROR and higher. The appender R don't have any filter attached to it, so it will log all messages.

The location of log4j2.xml need to be provided using system property log4j.configurationFile, for example -Dlog4j.configurationFile=/my/path/log4j2.xml

 Please note that system property log4j.configuration is used for Log4J 1.2 which has different syntax.



<?xml version="1.0" encoding="UTF-8"?>
<Configuration>
        <Appenders>
                <Console name="A" target="SYSTEM_OUT">
                        <PatternLayout pattern="%d [%t] %-5p {%F:%L} %x - %m%n" />
                        <ThresholdFilter level="ERROR" onMatch="ACCEPT" onMismatch="DENY"/>
                </Console>

                <RollingFile name="R"
                        fileName="/home/tomcat/logs/tomcat-log4j2.log" filePattern="/home/tomcat/logs/tomcat-log4j2-%d{yyyy-MM-dd}.log">
                        <PatternLayout pattern="%d [%t] %-5p {%F:%L} %x - %m%n" />
                        <Policies>
                                <TimeBasedTriggeringPolicy interval="1" />
                        </Policies>
                </RollingFile>
        </Appenders>


        <Loggers>
                <Logger name="org.apache" level="trace"/>
                <Root level="ERROR">
                        <AppenderRef ref="A" />
                        <AppenderRef ref="R" />
                </Root>
        </Loggers>
</Configuration>

Configuring Log4j 1.2

Log4j 1.2 is deprecated version of Log4J but it is still used extensively. It is default implementation apache commons-logging. I am going to explain configuration of log4j 1.2 in this post.

My aim it to log all message with log level ERROR and higher to log file and console both and messages with  level TRACE and higher to log file. This way console will receive higher priority messages and more detailed logs will be logged to the log file.

To achieve this I create two appenders A and R. A is console appender. I will set threshold of appender A to ERROR so any message lower than ERROR will not be logged with this appender. Appender R logs to tomcat.log and it does not have any threshold attached to it.

The location for log4.properties need to be specified using system property log4j.configuration. For example -Dlog4j.configuration=/my/path/log4j.properties 

Please note that log4j.configurationFile system property is used by Log4J 2 which has a different syntax.


log4j.rootLogger=ERROR,A,R
#*** A is the console appender
log4j.appender.A=org.apache.log4j.ConsoleAppender
#*** A uses pattern layout
log4j.appender.A.layout=org.apache.log4j.PatternLayout
log4j.appender.A.threshold=ERROR
log4j.appender.A.layout.ConversionPattern=%d [%t] %-5p {%F:%L} %x - %m%n

#**** R is the Rolling FileAppender
log4j.appender.R=org.apache.log4j.DailyRollingFileAppender
log4j.appender.R.File=/home/tomcat/logs/tomcat.log
log4j.appender.R.DatePattern='.'yyyy-MM-dd
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%d [%t] %-5p {%F:%L} %x - %m%n

#*** Log Levels
log4j.logger.org.apache=TRACE

Saturday 29 August 2020

Fixing EJB error in Wildfly 20

 Recently I was working on porting my company application to wildfly. We were stuck at an EJB exception when we tried to access any EJB method. We were accessing EJB from Wildfly application itself.


2020-08-29 13:26:20,833 [ServerService Thread Pool -- 102] ERROR {DataUpdaterImpl.java:1903} [] - EJBCLIENT000079: Unable to discover destination for request for EJB StatelessEJBLocator for "A/B/TestEJB", view is interface com.A.B.Test, affinity is None
javax.ejb.NoSuchEJBException: EJBCLIENT000079: Unable to discover destination for request for EJB StatelessEJBLocator for "ctools/ctservices/UniqueIdGeneratorEJB", view is interface com.tk20.ejb.api.util.UniqueIdGenerator, affinity is None
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:622) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.protocol.remote.RemotingEJBClientInterceptor.handleInvocationResult(RemotingEJBClientInterceptor.java:57) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:624) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.TransactionPostDiscoveryInterceptor.handleInvocationResult(TransactionPostDiscoveryInterceptor.java:148) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:624) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.DiscoveryEJBClientInterceptor.handleInvocationResult(DiscoveryEJBClientInterceptor.java:137) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:624) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.NamingEJBClientInterceptor.handleInvocationResult(NamingEJBClientInterceptor.java:87) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:624) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.TransactionInterceptor.handleInvocationResult(TransactionInterceptor.java:212) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:624) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.getResult(EJBClientInvocationContext.java:553) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBClientInvocationContext.awaitResponse(EJBClientInvocationContext.java:995) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBInvocationHandler.invoke(EJBInvocationHandler.java:191) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at org.jboss.ejb.client.EJBInvocationHandler.invoke(EJBInvocationHandler.java:125) ~[jboss-ejb-client-4.0.33.Final.jar!/:4.0.33.Final]
        at com.sun.proxy.$Proxy124.getUniqueIdString(Unknown Source) ~[?:?]


This error took a lot of time to fix. There was no information available on internet. The problem is the way JNDI lookup is done. We were looking up "ejb:A/B/TestEJB" in JNDI. The lookup was successful but method invocation failed. The problem was fixed after changing the JNDI lookup to "java:global/A/B/Test".

The remote lookup for "java:global/A/B/Test" in JNDI failed. So for remote invocation the JNDI lookup have to be done without any qualifier that is "/A/B/Test". After doing this remote EJB calls also started working fine.

The local lookup and remote lookup need to be done differently.

Please refer to following post for accessing EJBs from remote machine.

https://blog.bigdatawithjasvant.com/2021/06/fixing-jboss-local-user.html

Monday 20 July 2020

Tuning ThreadPoolExecutor and BlockingQueueSize

ThreadPoolExecutor uses a blocking queue for storing requests till some executor thread is available to execute the request. But if request are arriving at a rate higher than the rate at which requests are getting processed then this queue will keep growing and ultimately will lead to OutOfMemoryError and application with fail.

Solution to this problem is to use a bounded queue with fixed size and reject requests when queue is full. The problem is to find optimum size of the queue so that it can be sufficient to buffer the busts in requests and not too big to fail the system or provide late response which is as good as failure.

I conducted a small experiment using the following code to find out optimum queue size for ThreadPoolExecutor. This code uses a SynchronusQueue which has a queue size 0. This code stabilizes at 18 worker threads. When I use a Linked blocking Queue with size 10 this code stabalizes a 13 worker threads and stable queue size being 0 or 1 items in queue. When number of worker threads were 10 then the queue size increased to 10 and that lead to increase in worker threads but once worker threads were increased to 13 the requirement of queue size decreased. With queue size of 20 also thread pool stabilized at 13 worker threads. 13 worker threads were needed for processing average work load so increasing queue size to 20 did not help and was filled up eventually. Queue size of 10 was good enough. 

My recommendation is to keep the queue size equal to number of core threads.


package test;

import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

public class ThreadPoolTest {
 
 public static class Producer implements Runnable {
  ThreadPoolExecutor threadPool;
  public Producer(ThreadPoolExecutor es) {
   threadPool =es;
  }

  @Override
  public void run() {
   for(int i=0; i<180; i++) {
    int delay = ThreadLocalRandom.current().nextInt(2000);
    try {
     Thread.sleep(delay);
    } catch (InterruptedException e) {
     
    }
    threadPool.execute(new Task());
    int queueSize = threadPool.getQueue().size();
    int poolSize = threadPool.getPoolSize();
    System.out.println("Submitted task, queueSize="+ queueSize +" poolSize="+ poolSize);
   }   
  }
  
 }
 
 public static class Task implements Runnable{

  @Override
  public void run() {
   int delay = ThreadLocalRandom.current().nextInt(2000);
   try {
    Thread.sleep(delay);
   } catch (InterruptedException e) {
    return;
   }   
  }
  
 }

 public static void main(String[] args) {
  BlockingQueue bq = new SynchronousQueue<>();
  ThreadPoolExecutor tp = new ThreadPoolExecutor(10, 20, 2, TimeUnit.MINUTES,bq);
  for(int i=0; i<10; i++) {
   new Thread(new Producer(tp)).start();   
  }

 }

}

java.lang.OutOfMemoryError: Unable to create new native thread

Recently I received the error "java.lang.OutOfMemoryError: Unable to create new native thread" and when we debugged the issue on the linux machine the root cause was not related to memory but something totally different.  In Java when OS denies to create more threads because limit of number of processes have hit the limit then this error get mapped to java.lang.OutOfMemoryError because there is no specific error defined in java for capturing denial of creation of new thread due to hitting limit of number of processes.

If you want to check limits on a linux machine then you need to run the following command:

$ ulimit -a
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 62837
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 16384
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 10240
cpu time               (seconds, -t) unlimited
max user processes              (-u) 1024
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited


"max user process" define maximum number of child processes/threads a root level unix process can open. There are soft limits and hard limits, soft limits can be set on a process and it applicable to that process and child processes and hard limit is applicable to all processes of that user.

In our case soft limit was configured to 1024 and we were trying to create more number of threads.
The limit is defined in file "/etc/security/limits.d/90-nproc.conf" we changed number of processes to 2048 and our application started working. 


# Default limit for number of user's processes to prevent
# accidental fork bombs.
# See rhbz #432903 for reasoning.

*          soft    nproc     1024
root       soft    nproc     unlimited

In Java denial of any resource by OS to application maps to java.lang.OutOfMemoryError and real reason might not be related to memory.

Setting Compression Level in GZIPOutputStream

Most of the time people want to compress files which they are generating to save space on disk and bandwidth in transmission. Apart from saving space compression can actually speed up the application because of low disk usage because of small file size. For this the compression and decompression need to done in memory and not after writing whole uncompressed contents to disk.

There are two formats which can be used if you are generating files from Java. One is GZIPOutputStream which is used for generating GZIP files, other is ZipOutputStream which is used for generating ZIP files. There is one basic difference between GZIP and ZIP file. GZIP file can contain only one file inside it and name of the file contained inside it is optional and while ZIP file is an archive of multiple files and name of the files contained in a ZIP file is mandatory while creating a ZIP file. Because of presence of multiple files inside a ZIP file. ZIP file cannot be passed to a filter which will decompress a ZIP file on the fly from an input stream because filter can't select one file out of multiple files which may be present in the ZIP file.

For seamless processing of compressed file while reading GZIP format is most suitable one. But unfortunately Java API for GZIPOutputStream lacks one method which can be used to controlling compression level to achieve BEST_SPEED or BEST_COMPRESSION as per your need. This facility is available in ZipOutputStream. Sometime people just use ZipOutput stream by setting compression level to BEST_SPEED to gain performance  when they actually need GZIPOutputStream for compressing their data. It create problem for the reader because now he need to handle a archive which can potentially contain multiple file rather than a compressed file. In memory filters can't be used for decompression because of possibility of multiple files in ZIP file. Therefore there are no libraries which can provide in memory filter for reading ZIP file contents as a stream of data.

Fortunately you can set compression level in GZIPOutputStream also by creating sub class of GZIPOutputStream and exposing setLevel(int level) method in your subclass.  We did it in our code and achieved even slightly better results than using ZipOutputStream with BEST_SPEED compression level. Following is comparison when compressing a 5.4 GB file:


Zip compression with BEST_SPEED        48078219 bytes     80 seconds
GZip compression with BEST_SPEED     48078113 bytes     78 seconds

Here is the code for MyGZIPOutputStream class:

import java.util.zip.*;
import java.io.*;

public class MyGZIPOutputStream extends GZIPOutputStream
{
    /**
     * Creates a new output stream with the specified buffer size.
     * @param out the output stream
     * @param size the output buffer size
     * @exception IOException If an I/O error has occurred.
     * @exception IllegalArgumentException if size is <= 0
     */
    public MyGZIPOutputStream(OutputStream out, int size) throws IOException {
        super(out, size);
    }

    /**
     * Creates a new output stream with a default buffer size.
     * @param out the output stream
     * @exception IOException If an I/O error has occurred.
     */
    public MyGZIPOutputStream(OutputStream out) throws IOException {
        this(out, 512);
    }

    /**
     * Sets the compression level for subsequent entries which are DEFLATED.
     * The default setting is DEFAULT_COMPRESSION.
     * @param level the compression level (0-9)
     * @exception IllegalArgumentException if the compression level is invalid
     */
    public void setLevel(int level) {
        def.setLevel(level);
    }
}

Sample file for using it BEST_SPEED compression in GZIPOutputStream:

import java.io.BufferedWriter;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.zip.*;

public class GZipCompression {

    public static void main(String[] args) throws IOException {
        compressInputFile("a.txt", "a.txt.gz");
    }

    public static void compressInputFile(String inputFileName,
            String outputFileName) throws IOException {
        FileOutputStream fos = new FileOutputStream(new File(outputFileName));
        MyGZIPOutputStream gzos = null;
        byte[] buffer = new byte[1024];
        gzos = new MyGZIPOutputStream(fos);
        gzos.setLevel(Deflater.BEST_SPEED);
        long startTime = System.currentTimeMillis();              

        FileInputStream fis = new FileInputStream(inputFileName);

        int length;
        while ((length = fis.read(buffer)) > 0) {
            gzos.write(buffer, 0, length);
        }
        fis.close();
        gzos.close();

        long endTime = System.currentTimeMillis();
        System.out.println("Time taken to gzip "+ (endTime-startTime) + " miliseconds.");
    }
}

Wednesday 8 July 2020

Mounting EFS volume on EC2 machine

In my previous post I explained you how to mount EFS volume inside a fargate task container. EFS serves as persistent storage for ephemeral  container. Sometime you may want to see the data containers are storing in EFS. Most of the time containers are special purpose containers like MongoDB which does not provide any interface to browse the file system.

The easy way to look at EFS is to mount it on an EC2 instance. EFS volume can be mounted on multiple machines therefore you can mount it on EC2 machine and inside the container at the same time. You need to look at DNS name for EFS volume.

This DNS name is used while mounting EFS volume as a NFS drive on EC2 volume.

Create a directory for mounting EFS volume. For example I am creating /efs3


sudo mkdir /efs3

Now you can mount the EFS volume using following command. Please replace DNS name of your EFS volume:

sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport fs-9ce1684d.efs.ap-south-1.amazonaws.com:/ 
 /efs3

You can follow the following link for more information https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html

Saturday 27 June 2020

Mounting EFS volume in fargate task in AWS

In this post I am going to explain process of mounting an EFS disk in fargate service in AWS.
First step is to define volumes in task definitions. I am going to use an file browser container in this exercise which allow you to browse root file system of container. This container should be used only for experimentation and should be terminated as soon as you are done with your experiment.

I am creating a task definition fargate-filebrowser1 of fargate type.


Click on "Add Volume" link at the bottom of task definition page and fill in details of EFS volume.

After adding EFS volume to task definitions add container to task definition. You can add multiple containers to one task definitions but we will add only one. After clicking "Add Container" button provide container name and image name:

The Image name is jasvantsingh/myfilebrowser:latest it is based on filebrowser/filebrowser with just one change that it exposes root file system rather than one directory.

Scroll down to bottom of container options and provide mount point for EFS volume inside the container.

The EFS volume will be mounted at /vol2 as per this configuration. Any file created inside /vol2 directory will be persisted across task restart or deletion of service and recreation of service. EFS is a kind of NFS mount which reside on some other persistent storage outside the container.


Click on "Create" button under service tab of your cluster.

Provide service details. Please note that you need to select PLATFORM version 1.4.0 and not LATEST. LATEST version does not work maybe it is not mapping to 1.4.0


On the next screen provide networking details. Please make sure that "Auto-assign Public IP" is ENABLED. I will not be using any load balancer so the task need to have a public IP address to be accessed from outside.


On Auto-scaling screen keep auto scaling as disabled.


On review screen click create service button. The service will be created. Click on "View Service" button. It will take you to service details page. Wait for some for task to be started and listed in Task tab.


Click on the task ID. It will take you to task details page. Please note the public IP address of the task.


Paste the public IP Address in browser and access the file browser. User username as admin and password as admin for login in the file browser. You can see "vol2" directory there. This is persistent directory. You can place any file inside this directory and it will be persisted.


Please make sure to terminate the service and task because anybody can hack into this file browser and put malicious content there without your knowledge. This file browser is only for experimentation.

Please comment if something is not working. I will reply to your comment.

EFS does not work with fargate service in AWS

On April 8, 2020 Amazon announced availability of EFS for fargate services. I spent one week trying to mount EFS volume in my fargate service with no success. I was using platform version as LATEST. The service startup always failed with error:
Service creation failed: One or more of the requested capabilities are not supported.
I was trying to follow the tutorial at:
https://aws.amazon.com/blogs/aws/amazon-ecs-supports-efs/
After struggling for one week I noticed the following line in tutorial.
It’s also essential here to make sure that I set the platform version to 1.4.0
When I tried 1.4.0 platform version it worked.  Due to some reason 1.4.0 and LATEST are not same. EFS works in 1.4.0 but not in LATEST platform version.

Tuesday 28 April 2020

Number of ways to insert two pairs of parentheses into a string of N characters

I came across an interesting codding problem at https://www.geeksforgeeks.org/number-of-ways-to-insert-two-pairs-of-parentheses-into-a-string-of-n-characters/

Problem:
Given a string str of length N, the task is to find the number of ways to insert only 2 pairs of parentheses into the given string such that the resultant string is still valid.
Example:
Input: str = “ab”Output: 6((a))b, ((a)b), ((ab)), (a)(b), (a(b)), a((b))which are a total of 6 ways.
Solution:
Approach: it can be observed that for the lengths of the string 1, 2, 3, …, N a series will be formed as 1, 6, 20, 50, 105, 196, 336, 540, … whose Nth term is (N + 1)2 * ((N + 1)2 – 1) / 12. 
The solution is good but the page does not provide derivation of the formula which is base of the solution.  I tried to derive the formula on my own and this is what I came up with:

  1. There are N+1 places where parenthesis can be placed. One in the beginning, N-1 in middle and one at end. So there are   ((N + 1)4 )/4 ways to place the parenthesis. We can place multiple parenthesis at same location, one opening parenthesis can be replaced with another opening parenthesis and closing parenthesis can be replaced with other closing parenthesis therefore we divided by 4.
  2. Some of the combinations counted in previous step are invalid so we need to subtract them. 
  3. All four parenthesis at same location is not a valid case there are (N+1)/4  such combinations so we need to subtract them.
  4. Three parenthesis being at one location and one being at one location is also not a valid combination. There are (N+1)*N/4 such combinations so we need to subtract them.
  5. After the above considerations We come up with (N + 1)2 * ((N + 1)2 – 1) / 4 ways.
  6. Now if you think closely out of following 6 possible combinations of parenthesis only first two are valid. You ignore string character in between parenthesis.
    1. (())
    2. ()()
    3. )()(
    4. )(()
    5. ))((
    6. ())(
  7. So we need to divide by 3 to get valid ways of putting parenthesis. So we get the final formula as (N + 1)2 * ((N + 1)2 – 1) / 12.

            Thursday 16 April 2020

            An interesting Google interview question

            Recently I read about an interesting Google interview question at https://www.geeksforgeeks.org/count-strings-can-formed-using-b-c-given-constraints/

            The problem statement is:
            Given a length n, count the number of strings of length n that can be made using ‘a’, ‘b’ and ‘c’ with at-most one ‘b’ and two ‘c’s allowed.
            The page on GeeksForGeeks provide two programmatic solutions using recursion. But most interesting   one is mathematical one which directly calculate value. It took me quite long time to derive that formula. The formula is:
            count = 1 + 2n+ n*((n*n)-1)/2 
            Here I want to provide derivation for this formula. You may find it useful.


            1. count of strings with no b's and no c's = 1
            2. count of strings with one b and no c's = n
            3. count of strings with no b's and one c = n
            4. count of strings with one b and one c = n*(n-1)
            5. count of strings with one b and two c's = n*(n-1)*(n-2)/2;
            6. count of strings with no b and two c's = n*(n-1)/2


            If you add all 6 values then you will get
            count = 1 + 2n+ n*((n*n)-1)/2
            The above formula works only for n greater than or equal to 3. 

            Tuesday 14 April 2020

            The ACL permission error while exporting Cloud Watch logs to S3

            Yesterday I struggled for more than 6 hours to export Cloud Watch logs to S3 bucket. I was getting the following error:
            The ACL permission for the selected bucket is not correct. The Amazon S3 bucket must reside in the same region as the log data that you want to export. Learn more.


            I tried following all the steps mentioned in the link but still it did not work. Later on I found the mistake, it interesting one so I am writing it in my blog so that you don't make same mistake.

            says that you need to set following policy to S3 bucket:
            
            {
                "Version": "2012-10-17",
                "Statement": [
                  {
                      "Action": "s3:GetBucketAcl",
                      "Effect": "Allow",
                      "Resource": "arn:aws:s3:::my-exported-logs",
                      "Principal": { "Service": "logs.us-west-2.amazonaws.com" }
                  },
                  {
                      "Action": "s3:PutObject" ,
                      "Effect": "Allow",
                      "Resource": "arn:aws:s3:::my-exported-logs/random-string/*",
                      "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } },
                      "Principal": { "Service": "logs.us-west-2.amazonaws.com" }
                  }
                ]
            }
            
            
            Here my-exported-logs is the bucket name and it needs to be replaced with your bucket name and us-west-2 needs to be replaced with your region code for Mumbai it is ap-south-1

            The page says that random-string can be replaced with any random string which makes you believe that this string is not important but that is wrong. It is most important string for exporting logs to S3. The random string which you use in bucket permission needs to be provide as S3 bucket prefix while exporting logs to S3 bucket. If you don't provide S3 bucket prefix or provide a different prefix then you get the ACL error because the policy provide s3:PutObject permission only on random-string directory so if we try to put logs in some other directory then it will fail. The following export configuration works.
            The only difference between working and not working dialog box is random-string being provided as S3 bucket prefix. I learned it the hard way by wasting 6 hours.

            Wednesday 5 February 2020

            Kubernetes Error: Error response from daemon: invalid mode

            Recently I was trying to mount a persistence volume in a Kubernetes pod. I faced the error "Error: Error response from daemon: invalid mode". After struggling for a long time and searching internet I was able to solve this problem. Here I am writing the solution to this problem. I was trying to follow tutorial https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/ . Only difference  being that I was trying it on windows rather than Linux.

            I tried with following persistance_volume.yaml file
             
            apiVersion: v1
            kind: PersistentVolume
            metadata:
              name: task-pv-volume
              labels:
                type: local
            spec:
              storageClassName: manual
              capacity:
                storage: 10Gi
              accessModes:
                - ReadWriteOnce
              hostPath:
                path: "c:/jasvant/kubernetes/persistence_volume/data"
            
            
            My directory on windows was present at "C:\jasvant\kubernetes\persistence_volume\data" so I replaced all '\' with '/'. But is did not work and pod container failed to start.
            
            C:\Users\jasvant\kubernetes>kubectl get pod
            NAME                                  READY   STATUS                 RESTARTS   AGE
            hello-minikube-6fb6cb79cc-8drt9       1/1     Running                6          130d
            kubernetes-bootcamp-dd569fc9c-bt74m   1/1     Running                5          96d
            kubernetes-bootcamp-dd569fc9c-fl64v   1/1     Running                5          96d
            task-pv-pod                           0/1     CreateContainerError   0          62s
            
            

            The container failed to start with CreateContainerError. When I described the pod I got the following error:
            
            C:\Users\jasvant\kubernetes>kubectl describe pod  task-pv-pod
            Name:         task-pv-pod
            Namespace:    default
            Priority:     0
            Node:         minikube/10.0.2.15
            Start Time:   Wed, 05 Feb 2020 14:27:59 +0530
            Labels:       
            Annotations:  
            Status:       Pending
            IP:           172.17.0.7
            Containers:
              task-pv-container:
                Container ID:
                Image:          nginx
                Image ID:
                Port:           80/TCP
                Host Port:      0/TCP
                State:          Waiting
                  Reason:       CreateContainerError
                Ready:          False
                Restart Count:  0
                Environment:    
                Mounts:
                  /usr/share/nginx/html from task-pv-storage (rw)
                  /var/run/secrets/kubernetes.io/serviceaccount from default-token-scccw (ro)
            Conditions:
              Type              Status
              Initialized       True
              Ready             False
              ContainersReady   False
              PodScheduled      True
            Volumes:
              task-pv-storage:
                Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
                ClaimName:  task-pv-claim
                ReadOnly:   false
              default-token-scccw:
                Type:        Secret (a volume populated by a Secret)
                SecretName:  default-token-scccw
                Optional:    false
            QoS Class:       BestEffort
            Node-Selectors:  
            Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                             node.kubernetes.io/unreachable:NoExecute for 300s
            Events:
              Type     Reason     Age                 From               Message
              ----     ------     ----                ----               -------
              Normal   Scheduled  2m14s               default-scheduler  Successfully assigned default/task-pv-pod to minikube
              Normal   Pulled     14s (x8 over 2m8s)  kubelet, minikube  Successfully pulled image "nginx"
              Warning  Failed     14s (x8 over 2m8s)  kubelet, minikube  Error: Error response from daemon: invalid mode: /usr/share/nginx/html
              Normal   Pulling    4s (x9 over 2m13s)  kubelet, minikube  Pulling image "nginx"
            
            

            This error message fails to give any hint to actual problem. The root cause is that HostPath.path need to be a path on VM created by Minikube and not a directory path on windows machine. so C: was not accepted by minikube. It is not impossible to mount a windows directory onto VM and use that directory as persistence volume storage directory. By default 'C:\Users' directory from Windows machine is mounted at   /c/Users folder in VM. So "/c/Users/jasvant/kubernetes/persistence_volume/data" will work for me. But if you create any directory out of  'C:\Users' directory then it will not work.. Please refer to https://minikube.sigs.k8s.io/docs/tasks/mount/ for more information.

            If you want to login into VM created by Minikube for running Kubernetes cluster. you can use "minikube ssh" command on your windows machine. It will open a ssh to VM and you can browse the file system there.