Skip to content

Instantly share code, notes, and snippets.

@tamtam180
Last active December 14, 2015 00:58
Show Gist options
  • Save tamtam180/5002268 to your computer and use it in GitHub Desktop.
Save tamtam180/5002268 to your computer and use it in GitHub Desktop.
2013-02-21 14:07:09,310 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:hdfs (auth:SIMPLE) cause:java.net.SocketTimeoutException: Read timed out
2013-02-21 14:07:09,310 ERROR org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Exception in doCheckpoint
java.net.SocketTimeoutException: Read timed out
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.read(SocketInputStream.java:129)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at java.io.FilterInputStream.read(FilterInputStream.java:116)
at sun.net.www.protocol.http.HttpURLConnection$HttpInputStream.read(HttpURLConnection.java:2672)
at java.security.DigestInputStream.read(DigestInputStream.java:144)
at java.io.FilterInputStream.read(FilterInputStream.java:90)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:322)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.downloadImageToStorage(TransferFsImage.java:86)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$3.run(SecondaryNameNode.java:387)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$3.run(SecondaryNameNode.java:373)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1332)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.downloadCheckpointFiles(SecondaryNameNode.java:372)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:465)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doWork(SecondaryNameNode.java:331)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$2.run(SecondaryNameNode.java:298)
at org.apache.hadoop.security.SecurityUtil.doAsLoginUserOrFatal(SecurityUtil.java:452)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.run(SecondaryNameNode.java:294)
at java.lang.Thread.run(Thread.java:662)
2013-02-21 14:46:51,141 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://*****:50090/getimage?getimage=1&txid=125282869
&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1
2013-02-21 14:48:24,955 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:hdfs (auth:SIMPLE) cause:java.net.SocketTimeoutExcepti
on: Read timed out
2013-02-21 14:48:24,964 WARN org.mortbay.log: /getimage: java.io.IOException: GetImage failed. java.net.SocketTimeoutException: Read timed out
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.read(SocketInputStream.java:129)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at java.io.FilterInputStream.read(FilterInputStream.java:116)
at sun.net.www.protocol.http.HttpURLConnection$HttpInputStream.read(HttpURLConnection.java:2672)
at java.security.DigestInputStream.read(DigestInputStream.java:144)
at java.io.FilterInputStream.read(FilterInputStream.java:90)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:322)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.downloadImageToStorage(TransferFsImage.java:86)
at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:164)
at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:115)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1332)
at org.apache.hadoop.hdfs.server.namenode.GetImageServlet.doGet(GetImageServlet.java:115)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221)
at org.apache.hadoop.http.HttpServer$QuotingInputFilter.doFilter(HttpServer.java:1056)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399)
at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)
at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)
at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:766)
at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:450)
at org.mortbay.jetty.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:230)
at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
at org.mortbay.jetty.Server.handle(Server.java:326)
at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)
at org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)
at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)
at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:212)
at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)
at org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:410)
at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)
2013-02-21 14:48:24,960 ERROR org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Exception in doCheckpoint
org.apache.hadoop.hdfs.server.namenode.TransferFsImage$HttpGetFailedException: Image transfer servlet at http://***.***.***.***:50070/getimage?putimage=1&txid=125282869&port=50090&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1 failed with status code 410
Response message:
GetImage failed. java.net.SocketTimeoutException: Read timed out at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.read(SocketInputStream.java:129) at java.io.BufferedInputStream.read1(BufferedInputStream.java:256) at java.io.BufferedInputStream.read(BufferedInputStream.java:317) at java.io.FilterInputStream.read(FilterInputStream.java:116) at sun.net.www.protocol.http.HttpURLConnection$HttpInputStream.read(HttpURLConnection.java:2672) at java.security.DigestInputStream.read(DigestInputStream.java:144) at java.io.FilterInputStream.read(FilterInputStream.java:90) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:322) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.downloadImageToStorage(TransferFsImage.java:86) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:164) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:115) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:396) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1332) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet.doGet(GetImageServlet.java:115) at javax.servlet.http.HttpServlet.service(HttpServlet.java:707) at javax.servlet.http.HttpServlet.service(HttpServlet.java:820) at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221) at org.apache.hadoop.http.HttpServer$QuotingInputFilter.doFilter(HttpServer.java:1056) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399) at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216) at org.mortbay.jett
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:245)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.uploadImageFromStorage(TransferFsImage.java:137)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:474)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doWork(SecondaryNameNode.java:331)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$2.run(SecondaryNameNode.java:298)
at org.apache.hadoop.security.SecurityUtil.doAsLoginUserOrFatal(SecurityUtil.java:452)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.run(SecondaryNameNode.java:294)
at java.lang.Thread.run(Thread.java:662)
************************************************************/
2013-02-21 15:03:12,896 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from hadoop-metrics2.properties
2013-02-21 15:03:12,911 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at 10 second(s).
2013-02-21 15:03:12,911 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: SecondaryNameNode metrics system started
2013-02-21 15:03:13,075 WARN org.apache.hadoop.hdfs.server.common.Util: Path /var/lib/hadoop-hdfs/dfs/namesecondary should be specified as a URI in configuration files. Please update hdfs configuration.
2013-02-21 15:03:13,076 WARN org.apache.hadoop.hdfs.server.common.Util: Path /var/lib/hadoop-hdfs/dfs/namesecondary should be specified as a URI in configuration files. Please update hdfs configuration.
2013-02-21 15:03:13,132 INFO org.apache.hadoop.hdfs.server.common.Storage: Lock on /var/lib/hadoop-hdfs/dfs/namesecondary/in_use.lock acquired by nodename 17003@*****
2013-02-21 15:03:13,204 INFO org.apache.hadoop.util.HostsFileReader: Refreshing hosts (include/exclude) list
2013-02-21 15:03:13,204 INFO org.apache.hadoop.util.HostsFileReader: Adding ***** to the list of hosts from /etc/hadoop/conf/hosts.include
2013-02-21 15:03:13,204 INFO org.apache.hadoop.util.HostsFileReader: Adding ***** to the list of hosts from /etc/hadoop/conf/hosts.include
2013-02-21 15:03:13,204 INFO org.apache.hadoop.util.HostsFileReader: Adding ***** to the list of hosts from /etc/hadoop/conf/hosts.include
2013-02-21 15:03:43,361 INFO org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
2013-02-21 15:03:43,443 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: dfs.block.access.token.enable=false
2013-02-21 15:03:43,443 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: defaultReplication = 3
2013-02-21 15:03:43,443 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: maxReplication = 512
2013-02-21 15:03:43,443 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: minReplication = 1
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: maxReplicationStreams = 2
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: shouldCheckForEnoughRacks = true
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: replicationRecheckInterval = 3000
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: encryptDataTransfer = false
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner = hdfs (auth:SIMPLE)
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup = hadoop
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled = false
2013-02-21 15:03:43,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: HA Enabled: false
2013-02-21 15:03:43,456 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Append Enabled: true
2013-02-21 15:03:43,948 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Caching file names occuring more than 10 times
2013-02-21 15:03:43,951 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
2013-02-21 15:03:43,951 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
2013-02-21 15:03:43,951 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
2013-02-21 15:03:44,058 INFO org.mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
2013-02-21 15:03:44,128 INFO org.apache.hadoop.http.HttpServer: Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
2013-02-21 15:03:44,131 INFO org.apache.hadoop.http.HttpServer: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context secondary
2013-02-21 15:03:44,131 INFO org.apache.hadoop.http.HttpServer: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
2013-02-21 15:03:44,132 INFO org.apache.hadoop.http.HttpServer: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs
2013-02-21 15:03:44,152 INFO org.apache.hadoop.http.HttpServer: Jetty bound to port 50090
2013-02-21 15:03:44,153 INFO org.mortbay.log: jetty-6.1.26.cloudera.2
2013-02-21 15:03:44,486 INFO org.mortbay.log: Started [email protected]:50090
2013-02-21 15:03:44,486 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Web server init done
2013-02-21 15:03:44,487 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Secondary Web-server up at: 0.0.0.0:50090
2013-02-21 15:03:44,487 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Checkpoint Period :600 secs (10 min)
2013-02-21 15:03:44,487 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Log Size Trigger :40000 txns
2013-02-21 15:04:44,650 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Image has changed. Downloading updated image from NN.
2013-02-21 15:04:44,669 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://*****:50070/getimage?getimage=1&txid=125282835&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1
2013-02-21 15:09:43,173 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Transfer took 298.50s at 9180.93 KB/s
2013-02-21 15:09:43,173 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Downloaded file fsimage.ckpt_0000000000125282835 size 2806291337 bytes.
2013-02-21 15:09:43,852 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282836,125282837] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282836-0000000000125282837
2013-02-21 15:09:43,852 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282838,125282845] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282838-0000000000125282845
2013-02-21 15:09:43,852 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282846,125282847] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282846-0000000000125282847
2013-02-21 15:09:43,853 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282848,125282849] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282848-0000000000125282849
2013-02-21 15:09:43,853 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282850,125282851] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282850-0000000000125282851
2013-02-21 15:09:43,853 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282852,125282853] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282852-0000000000125282853
2013-02-21 15:09:43,854 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282854,125282855] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282854-0000000000125282855
2013-02-21 15:09:43,854 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282856,125282857] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282856-0000000000125282857
2013-02-21 15:09:43,854 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282858,125282859] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282858-0000000000125282859
2013-02-21 15:09:43,854 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282860,125282861] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282860-0000000000125282861
2013-02-21 15:09:43,855 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282862,125282863] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282862-0000000000125282863
2013-02-21 15:09:43,855 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282864,125282865] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282864-0000000000125282865
2013-02-21 15:09:43,855 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282866,125282867] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282866-0000000000125282867
2013-02-21 15:09:43,856 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282868,125282869] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282868-0000000000125282869
2013-02-21 15:09:43,856 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Skipping download of remote edit log [125282870,125282871] since it already is stored locally at /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282870-0000000000125282871
2013-02-21 15:09:43,856 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://*****:50070/getimage?getedit=1&startTxId=125282872&endTxId=125282873&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1
2013-02-21 15:09:43,874 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Transfer took 0.02s at 0.00 KB/s
2013-02-21 15:09:43,874 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Downloaded file edits_0000000000125282872-0000000000125282873 size 30 bytes.
2013-02-21 15:09:43,878 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Loading image file /var/lib/hadoop-hdfs/dfs/namesecondary/current/fsimage_0000000000125282835 using no compression
2013-02-21 15:09:43,878 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Number of files = 32303744
2013-02-21 15:13:00,254 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Number of files under construction = 2
2013-02-21 15:13:00,258 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Image file of size 2806291337 loaded in 196 seconds.
2013-02-21 15:13:00,258 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Loaded image for txid 125282835 from /var/lib/hadoop-hdfs/dfs/namesecondary/current/fsimage_0000000000125282835
2013-02-21 15:13:00,267 INFO org.apache.hadoop.hdfs.server.namenode.Checkpointer: Checkpointer about to load edits from 16 stream(s).
2013-02-21 15:13:00,270 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282836-0000000000125282837 expecting start txid #125282836
2013-02-21 15:13:00,282 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282836-0000000000125282837 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,282 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282838-0000000000125282845 expecting start txid #125282838
2013-02-21 15:13:00,284 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282838-0000000000125282845 of size 624 edits # 8 loaded in 0 seconds.
2013-02-21 15:13:00,284 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282846-0000000000125282847 expecting start txid #125282846
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282846-0000000000125282847 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282848-0000000000125282849 expecting start txid #125282848
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282848-0000000000125282849 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282850-0000000000125282851 expecting start txid #125282850
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282850-0000000000125282851 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282852-0000000000125282853 expecting start txid #125282852
2013-02-21 15:13:00,285 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282852-0000000000125282853 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282854-0000000000125282855 expecting start txid #125282854
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282854-0000000000125282855 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282856-0000000000125282857 expecting start txid #125282856
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282856-0000000000125282857 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282858-0000000000125282859 expecting start txid #125282858
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282858-0000000000125282859 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,286 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282860-0000000000125282861 expecting start txid #125282860
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282860-0000000000125282861 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282862-0000000000125282863 expecting start txid #125282862
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282862-0000000000125282863 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282864-0000000000125282865 expecting start txid #125282864
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282864-0000000000125282865 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282866-0000000000125282867 expecting start txid #125282866
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282866-0000000000125282867 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,287 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282868-0000000000125282869 expecting start txid #125282868
2013-02-21 15:13:00,288 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282868-0000000000125282869 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,288 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282870-0000000000125282871 expecting start txid #125282870
2013-02-21 15:13:00,288 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282870-0000000000125282871 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:00,288 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282872-0000000000125282873 expecting start txid #125282872
2013-02-21 15:13:00,288 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000125282872-0000000000125282873 of size 30 edits # 2 loaded in 0 seconds.
2013-02-21 15:13:02,412 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Saving image file /var/lib/hadoop-hdfs/dfs/namesecondary/current/fsimage.ckpt_0000000000125282873 using no compression
2013-02-21 15:14:12,143 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Image file of size 2806290949 saved in 69 seconds.
2013-02-21 15:14:12,153 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Going to retain 10 images with txid >= 125282825
2013-02-21 15:14:12,153 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Purging old image FSImageFile(file=/var/lib/hadoop-hdfs/dfs/namesecondary/current/fsimage_0000000000124687274, cpktTxId=0000000000124687274)
2013-02-21 15:14:12,409 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Purging old edit log EditLogFile(file=/var/lib/hadoop-hdfs/dfs/namesecondary/current/edits_0000000000123598279-0000000000124060123,first=0000000000123598279,last=0000000000124060123,inProgress=false,hasCorruptHeader=false)
2013-02-21 15:14:12,413 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://*****:50070/getimage?putimage=1&txid=125282873&port=50090&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1
2013-02-21 15:15:45,428 ERROR org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Exception in doCheckpoint
org.apache.hadoop.hdfs.server.namenode.TransferFsImage$HttpGetFailedException: Image transfer servlet at http://*****:50070/getimage?putimage=1&txid=125282873&port=50090&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1 failed with status code 410
Response message:
GetImage failed. java.net.SocketTimeoutException: Read timed out at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.read(SocketInputStream.java:129) at java.io.BufferedInputStream.read1(BufferedInputStream.java:256) at java.io.BufferedInputStream.read(BufferedInputStream.java:317) at java.io.FilterInputStream.read(FilterInputStream.java:116) at sun.net.www.protocol.http.HttpURLConnection$HttpInputStream.read(HttpURLConnection.java:2672) at java.security.DigestInputStream.read(DigestInputStream.java:144) at java.io.FilterInputStream.read(FilterInputStream.java:90) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:322) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222) at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.downloadImageToStorage(TransferFsImage.java:86) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:164) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet$1.run(GetImageServlet.java:115) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:396) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1332) at org.apache.hadoop.hdfs.server.namenode.GetImageServlet.doGet(GetImageServlet.java:115) at javax.servlet.http.HttpServlet.service(HttpServlet.java:707) at javax.servlet.http.HttpServlet.service(HttpServlet.java:820) at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221) at org.apache.hadoop.http.HttpServer$QuotingInputFilter.doFilter(HttpServer.java:1056) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399) at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216) at org.mortbay.jett
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.doGetUrl(TransferFsImage.java:245)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.getFileClient(TransferFsImage.java:222)
at org.apache.hadoop.hdfs.server.namenode.TransferFsImage.uploadImageFromStorage(TransferFsImage.java:137)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:474)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doWork(SecondaryNameNode.java:331)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$2.run(SecondaryNameNode.java:298)
at org.apache.hadoop.security.SecurityUtil.doAsLoginUserOrFatal(SecurityUtil.java:452)
at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.run(SecondaryNameNode.java:294)
at java.lang.Thread.run(Thread.java:662)
2013-02-21 15:16:45,463 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Image has changed. Downloading updated image from NN.
2013-02-21 15:16:45,464 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://*****:50070/getimage?getimage=1&txid=125282835&storageInfo=-40:457134500:0:CID-61bc5c82-0f33-493a-a5a7-f8d6184f18b1
2013-02-21 15:18:37,900 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down SecondaryNameNode at *****/*****
@bommuraj2012
Copy link

how these issues has been resolved ?
i am getting same error on my hadoop cluster.

Best Regards,
Bommuraj

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment