001/* 002 * Trident - A Multithreaded Server Alternative 003 * Copyright 2014 The TridentSDK Team 004 * 005 * Licensed under the Apache License, Version 2.0 (the "License"); 006 * you may not use this file except in compliance with the License. 007 * You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017 018package net.tridentsdk.server.world; 019 020import net.tridentsdk.meta.nbt.CompoundTag; 021import net.tridentsdk.meta.nbt.NBTDecoder; 022import net.tridentsdk.meta.nbt.NBTEncoder; 023import net.tridentsdk.meta.nbt.NBTException; 024import net.tridentsdk.util.TridentLogger; 025import net.tridentsdk.world.ChunkLocation; 026 027import java.io.*; 028import java.nio.file.Path; 029import java.nio.file.Paths; 030import java.util.ArrayList; 031import java.util.concurrent.ConcurrentHashMap; 032import java.util.concurrent.ConcurrentMap; 033import java.util.zip.DeflaterOutputStream; 034import java.util.zip.GZIPInputStream; 035import java.util.zip.InflaterInputStream; 036 037/** 038 * Represents a Region File (in region/ directory) in memory 039 */ 040public class RegionFile { 041 private static final ConcurrentMap<Path, RegionFile> FILE_CACHE = new ConcurrentHashMap<>(); 042 043 private static final int VERSION_GZIP = 1; 044 private static final int VERSION_DEFLATE = 2; 045 046 private static final int SECTOR_BYTES = 4096; 047 private static final int SECTOR_INTS = SECTOR_BYTES / 4; 048 049 static final int CHUNK_HEADER_SIZE = 5; 050 private static final byte emptySector[] = new byte[4096]; 051 052 private final File fileName; 053 private RandomAccessFile file; 054 private final int offsets[]; 055 private final int chunkTimestamps[]; 056 private ArrayList<Boolean> sectorFree; 057 private int sizeDelta; 058 private long lastModified = 0; 059 060 public static RegionFile fromPath(String name, ChunkLocation location) { 061 final Path path = Paths.get(name + "/region/", WorldUtils.regionFile(location)); 062 return FILE_CACHE.computeIfAbsent(path, (k) -> new RegionFile(k.toFile())); 063 } 064 065 public CompoundTag decode(ChunkLocation location) { 066 DataInputStream dis = getChunkDataInputStream(location.x() & 31, location.z() & 31); 067 if (dis == null) return null; 068 069 try { 070 return new NBTDecoder(dis).decode(); 071 } catch (NBTException e) { 072 e.printStackTrace(); 073 } 074 075 return null; 076 } 077 078 public TridentChunk loadChunkData(TridentWorld world, ChunkLocation location) { 079 CompoundTag chunkRoot = decode(location); 080 if (chunkRoot == null) return null; 081 082 TridentChunk chunk = new TridentChunk(world, location); 083 chunk.load(chunkRoot); 084 085 return chunk; 086 } 087 088 public void saveChunkData(TridentChunk chunk) { 089 ChunkLocation loc = chunk.location(); 090 CompoundTag chunkRoot = chunk.asNbt(); 091 DataOutputStream dos = getChunkDataOutputStream(loc.x() & 31, loc.z() & 31); 092 NBTEncoder encoder = new NBTEncoder(dos); 093 try { 094 encoder.encode(chunkRoot); 095 } catch (NBTException e) { 096 e.printStackTrace(); 097 } finally { 098 try { 099 dos.close(); 100 } catch (IOException e) { 101 e.printStackTrace(); 102 } 103 } 104 } 105 106 ////////////////////////////// ACTUAL FILE ///////////////////////////// 107 108 public RegionFile(File path) { 109 offsets = new int[SECTOR_INTS]; 110 chunkTimestamps = new int[SECTOR_INTS]; 111 112 fileName = path; 113 // // debugln("REGION LOAD " + fileName); 114 115 sizeDelta = 0; 116 117 try { 118 if (path.exists()) { 119 lastModified = path.lastModified(); 120 } 121 122 file = new RandomAccessFile(path, "rw"); 123 124 if (file.length() < SECTOR_BYTES) { 125 /* we need to write the chunk offset table */ 126 for (int i = 0; i < SECTOR_INTS; ++i) { 127 file.writeInt(0); 128 } 129 // write another sector for the timestamp info 130 for (int i = 0; i < SECTOR_INTS; ++i) { 131 file.writeInt(0); 132 } 133 134 sizeDelta += SECTOR_BYTES * 2; 135 } 136 137 if ((file.length() & 0xfff) != 0) { 138 /* the file size is not a multiple of 4KB, grow it */ 139 for (int i = 0; i < (file.length() & 0xfff); ++i) { 140 file.write((byte) 0); 141 } 142 } 143 144 /* set up the available sector map */ 145 int nSectors = (int) file.length() / SECTOR_BYTES; 146 sectorFree = new ArrayList<Boolean>(nSectors); 147 148 for (int i = 0; i < nSectors; ++i) { 149 sectorFree.add(true); 150 } 151 152 sectorFree.set(0, false); // chunk offset table 153 sectorFree.set(1, false); // for the last modified info 154 155 file.seek(0); 156 for (int i = 0; i < SECTOR_INTS; ++i) { 157 int offset = file.readInt(); 158 offsets[i] = offset; 159 if (offset != 0 && (offset >> 8) + (offset & 0xFF) <= sectorFree.size()) { 160 for (int sectorNum = 0; sectorNum < (offset & 0xFF); ++sectorNum) { 161 sectorFree.set((offset >> 8) + sectorNum, false); 162 } 163 } 164 } 165 for (int i = 0; i < SECTOR_INTS; ++i) { 166 int lastModValue = file.readInt(); 167 chunkTimestamps[i] = lastModValue; 168 } 169 } catch (IOException e) { 170 e.printStackTrace(); 171 } 172 } 173 174 /* the modification date of the region file when it was first opened */ 175 public long lastModified() { 176 return lastModified; 177 } 178 179 /* gets how much the region file has grown since it was last checked */ 180 public synchronized int getSizeDelta() { 181 int ret = sizeDelta; 182 sizeDelta = 0; 183 return ret; 184 } 185 186 // various small // debug printing helpers 187 private void debug(String in) { 188 TridentLogger.get().warn(in); 189 } 190 191 private void debugln(String in) { 192 // debug(in); 193 } 194 195 private void debug(String mode, int x, int z, String in) { 196 // debug("REGION " + mode + " " + fileName.getName() + "[" + x + "," + z + "] = " + in); 197 } 198 199 private void debug(String mode, int x, int z, int count, String in) { 200 // debug("REGION " + mode + " " + fileName.getName() + "[" + x + "," + z + "] " + count + "B = " + in); 201 } 202 203 private void debugln(String mode, int x, int z, String in) { 204 // debug(mode, x, z, in); 205 } 206 207 /* 208 * gets an (uncompressed) stream representing the chunk data returns null if 209 * the chunk is not found or an error occurs 210 */ 211 public synchronized DataInputStream getChunkDataInputStream(int x, int z) { 212 if (outOfBounds(x, z)) { 213 // // debugln("READ", x, z, "out of bounds"); 214 return null; 215 } 216 217 try { 218 int offset = getOffset(x, z); 219 if (offset == 0) { 220 // // debugln("READ", x, z, "miss"); 221 return null; 222 } 223 224 int sectorNumber = offset >> 8; 225 int numSectors = offset & 0xFF; 226 227 if (sectorNumber + numSectors > sectorFree.size()) { 228 // debugln("READ", x, z, "invalid sector"); 229 return null; 230 } 231 232 file.seek(sectorNumber * SECTOR_BYTES); 233 int length = file.readInt(); 234 235 if (length > SECTOR_BYTES * numSectors) { 236 // debugln("READ", x, z, "invalid length: " + length + " > 4096 * " + numSectors); 237 return null; 238 } 239 240 byte version = file.readByte(); 241 if (version == VERSION_GZIP) { 242 byte[] data = new byte[length - 1]; 243 file.read(data); 244 DataInputStream ret = new DataInputStream(new GZIPInputStream(new ByteArrayInputStream(data))); 245 // debug("READ", x, z, " = found"); 246 return ret; 247 } else if (version == VERSION_DEFLATE) { 248 byte[] data = new byte[length - 1]; 249 file.read(data); 250 DataInputStream ret = new DataInputStream(new InflaterInputStream(new ByteArrayInputStream(data))); 251 // debug("READ", x, z, " = found"); 252 return ret; 253 } 254 255 // debugln("READ", x, z, "unknown version " + version); 256 return null; 257 } catch (IOException e) { 258 // debugln("READ", x, z, "exception"); 259 return null; 260 } 261 } 262 263 public DataOutputStream getChunkDataOutputStream(int x, int z) { 264 if (outOfBounds(x, z)) return null; 265 266 return new DataOutputStream(new DeflaterOutputStream(new ChunkBuffer(x, z))); 267 } 268 269 /* 270 * lets chunk writing be multithreaded by not locking the whole file as a 271 * chunk is serializing -- only writes when serialization is over 272 */ 273 class ChunkBuffer extends ByteArrayOutputStream { 274 private int x, z; 275 276 public ChunkBuffer(int x, int z) { 277 super(8096); // initialize to 8KB 278 this.x = x; 279 this.z = z; 280 } 281 282 public void close() { 283 RegionFile.this.write(x, z, buf, count); 284 } 285 } 286 287 /* write a chunk at (x,z) with length bytes of data to disk */ 288 protected synchronized void write(int x, int z, byte[] data, int length) { 289 try { 290 int offset = getOffset(x, z); 291 int sectorNumber = offset >> 8; 292 int sectorsAllocated = offset & 0xFF; 293 int sectorsNeeded = (length + CHUNK_HEADER_SIZE) / SECTOR_BYTES + 1; 294 295 // maximum chunk size is 1MB 296 if (sectorsNeeded >= 256) { 297 return; 298 } 299 300 if (sectorNumber != 0 && sectorsAllocated == sectorsNeeded) { 301 /* we can simply overwrite the old sectors */ 302 // debug("SAVE", x, z, length, "rewrite"); 303 write(sectorNumber, data, length); 304 } else { 305 /* we need to allocate new sectors */ 306 307 /* mark the sectors previously used for this chunk as free */ 308 for (int i = 0; i < sectorsAllocated; ++i) { 309 sectorFree.set(sectorNumber + i, true); 310 } 311 312 /* scan for a free space large enough to store this chunk */ 313 int runStart = sectorFree.indexOf(true); 314 int runLength = 0; 315 if (runStart != -1) { 316 for (int i = runStart; i < sectorFree.size(); ++i) { 317 if (runLength != 0) { 318 if (sectorFree.get(i)) runLength++; 319 else runLength = 0; 320 } else if (sectorFree.get(i)) { 321 runStart = i; 322 runLength = 1; 323 } 324 if (runLength >= sectorsNeeded) { 325 break; 326 } 327 } 328 } 329 330 if (runLength >= sectorsNeeded) { 331 /* we found a free space large enough */ 332 // debug("SAVE", x, z, length, "reuse"); 333 sectorNumber = runStart; 334 setOffset(x, z, (sectorNumber << 8) | sectorsNeeded); 335 for (int i = 0; i < sectorsNeeded; ++i) { 336 sectorFree.set(sectorNumber + i, false); 337 } 338 write(sectorNumber, data, length); 339 } else { 340 /* 341 * no free space large enough found -- we need to grow the 342 * file 343 */ 344 // debug("SAVE", x, z, length, "grow"); 345 file.seek(file.length()); 346 sectorNumber = sectorFree.size(); 347 for (int i = 0; i < sectorsNeeded; ++i) { 348 file.write(emptySector); 349 sectorFree.add(false); 350 } 351 sizeDelta += SECTOR_BYTES * sectorsNeeded; 352 353 write(sectorNumber, data, length); 354 setOffset(x, z, (sectorNumber << 8) | sectorsNeeded); 355 } 356 } 357 setTimestamp(x, z, (int) (System.currentTimeMillis() / 1000L)); 358 } catch (IOException e) { 359 e.printStackTrace(); 360 } 361 } 362 363 /* write a chunk data to the region file at specified sector number */ 364 private void write(int sectorNumber, byte[] data, int length) throws IOException { 365 // debugln(" " + sectorNumber); 366 file.seek(sectorNumber * SECTOR_BYTES); 367 file.writeInt(length + 1); // chunk length 368 file.writeByte(VERSION_DEFLATE); // chunk version number 369 file.write(data, 0, length); // chunk data 370 } 371 372 /* is this an invalid chunk coordinate? */ 373 private boolean outOfBounds(int x, int z) { 374 return x < 0 || x >= 32 || z < 0 || z >= 32; 375 } 376 377 private int getOffset(int x, int z) { 378 return offsets[x + z * 32]; 379 } 380 381 public boolean hasChunk(int x, int z) { 382 return getOffset(x, z) != 0; 383 } 384 385 private void setOffset(int x, int z, int offset) throws IOException { 386 offsets[x + z * 32] = offset; 387 file.seek((x + z * 32) * 4); 388 file.writeInt(offset); 389 } 390 391 private void setTimestamp(int x, int z, int value) throws IOException { 392 chunkTimestamps[x + z * 32] = value; 393 file.seek(SECTOR_BYTES + (x + z * 32) * 4); 394 file.writeInt(value); 395 } 396 397 public void close() throws IOException { 398 file.close(); 399 } 400}