Skip to content

Commit e59cdec

Browse files
committed
Change non-public top-level classes to be nested classes.
1 parent 80d964f commit e59cdec

File tree

5 files changed

+334
-334
lines changed

5 files changed

+334
-334
lines changed

src/main/java/org/seqdoop/hadoop_bam/BCFRecordReader.java

Lines changed: 60 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -177,77 +177,77 @@ public boolean nextKeyValue() throws IOException {
177177
vc.set(v);
178178
return true;
179179
}
180-
}
181-
182-
class BGZFLimitingStream extends InputStream {
183180

184-
private final BlockCompressedInputStream bgzf;
185-
private final long virtEnd;
186-
private byte[] readBuf = new byte[1];
187-
188-
public BGZFLimitingStream(BlockCompressedInputStream stream, long virtualEnd) {
189-
bgzf = stream;
190-
virtEnd = virtualEnd;
191-
}
181+
static class BGZFLimitingStream extends InputStream {
192182

193-
@Override
194-
public void close() throws IOException {
195-
bgzf.close();
196-
}
183+
private final BlockCompressedInputStream bgzf;
184+
private final long virtEnd;
185+
private byte[] readBuf = new byte[1];
197186

198-
@Override
199-
public int read() throws IOException {
200-
switch (read(readBuf)) {
201-
case 1:
202-
return readBuf[0];
203-
case -1:
204-
return -1;
205-
default:
206-
assert false;
207-
return -1;
187+
public BGZFLimitingStream(BlockCompressedInputStream stream, long virtualEnd) {
188+
bgzf = stream;
189+
virtEnd = virtualEnd;
208190
}
209-
}
210191

211-
@Override
212-
public int read(byte[] buf, int off, int len) throws IOException {
213-
214-
int totalRead = 0;
215-
long virt;
216-
217-
final int lastLen = (int) virtEnd & 0xffff;
218-
219-
while ((virt = bgzf.getFilePointer()) >>> 16 != virtEnd >>> 16) {
220-
// We're not in the last BGZF block yet. Unfortunately
221-
// BlockCompressedInputStream doesn't expose the length of the current
222-
// block, so we can't simply (possibly repeatedly) read the current
223-
// block to the end. Instead, we read at most virtEnd & 0xffff at a
224-
// time, which ensures that we can't overshoot virtEnd even if the
225-
// next block starts immediately.
226-
final int r = bgzf.read(buf, off, Math.min(len, lastLen));
227-
if (r == -1) {
228-
return totalRead == 0 ? -1 : totalRead;
229-
}
192+
@Override
193+
public void close() throws IOException {
194+
bgzf.close();
195+
}
230196

231-
totalRead += r;
232-
len -= r;
233-
if (len == 0) {
234-
return totalRead;
197+
@Override
198+
public int read() throws IOException {
199+
switch (read(readBuf)) {
200+
case 1:
201+
return readBuf[0];
202+
case -1:
203+
return -1;
204+
default:
205+
assert false;
206+
return -1;
235207
}
236-
off += r;
237208
}
238209

239-
// We're in the last BGZF block: read only up to lastLen.
240-
len = Math.min(len, ((int) virt & 0xffff) - lastLen);
241-
while (len > 0) {
242-
final int r = bgzf.read(buf, off, len);
243-
if (r == -1) {
244-
return totalRead == 0 ? -1 : totalRead;
210+
@Override
211+
public int read(byte[] buf, int off, int len) throws IOException {
212+
213+
int totalRead = 0;
214+
long virt;
215+
216+
final int lastLen = (int) virtEnd & 0xffff;
217+
218+
while ((virt = bgzf.getFilePointer()) >>> 16 != virtEnd >>> 16) {
219+
// We're not in the last BGZF block yet. Unfortunately
220+
// BlockCompressedInputStream doesn't expose the length of the current
221+
// block, so we can't simply (possibly repeatedly) read the current
222+
// block to the end. Instead, we read at most virtEnd & 0xffff at a
223+
// time, which ensures that we can't overshoot virtEnd even if the
224+
// next block starts immediately.
225+
final int r = bgzf.read(buf, off, Math.min(len, lastLen));
226+
if (r == -1) {
227+
return totalRead == 0 ? -1 : totalRead;
228+
}
229+
230+
totalRead += r;
231+
len -= r;
232+
if (len == 0) {
233+
return totalRead;
234+
}
235+
off += r;
245236
}
246237

247-
totalRead += r;
248-
len -= r;
249-
off += r;
238+
// We're in the last BGZF block: read only up to lastLen.
239+
len = Math.min(len, ((int) virt & 0xffff) - lastLen);
240+
while (len > 0) {
241+
final int r = bgzf.read(buf, off, len);
242+
if (r == -1) {
243+
return totalRead == 0 ? -1 : totalRead;
244+
}
245+
246+
totalRead += r;
247+
len -= r;
248+
off += r;
249+
}
250+
return totalRead == 0 ? -1 : totalRead;
250251
}
251-
return totalRead == 0 ? -1 : totalRead;
252252
}
253253
}

src/main/java/org/seqdoop/hadoop_bam/BCFRecordWriter.java

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -122,57 +122,57 @@ protected void writeRecord(VariantContext vc) {
122122

123123
writer.add(vc);
124124
}
125-
}
126-
127-
// We must always call writer.writeHeader() because the writer requires
128-
// the header in writer.add(), and writeHeader() is the only way to give
129-
// the header to the writer. Thus, we use this class to simply throw away
130-
// output until after the header's been written.
131-
//
132-
// This is, of course, a HACK and a slightly dangerous one: if writer
133-
// does any buffering of its own and doesn't flush after writing the
134-
// header, this isn't as easy as this.
135-
//
136-
// In addition we do BGZF compression here, to simplify things.
137-
final class BCFStoppableOutputStream extends FilterOutputStream {
138125

139-
private final OutputStream origOut;
140-
public boolean stopped;
141-
142-
public BCFStoppableOutputStream(boolean startStopped, OutputStream out) {
143-
super(new BlockCompressedOutputStream(out, null));
144-
origOut = out;
145-
stopped = startStopped;
146-
}
126+
// We must always call writer.writeHeader() because the writer requires
127+
// the header in writer.add(), and writeHeader() is the only way to give
128+
// the header to the writer. Thus, we use this class to simply throw away
129+
// output until after the header's been written.
130+
//
131+
// This is, of course, a HACK and a slightly dangerous one: if writer
132+
// does any buffering of its own and doesn't flush after writing the
133+
// header, this isn't as easy as this.
134+
//
135+
// In addition we do BGZF compression here, to simplify things.
136+
static final class BCFStoppableOutputStream extends FilterOutputStream {
137+
138+
private final OutputStream origOut;
139+
public boolean stopped;
140+
141+
public BCFStoppableOutputStream(boolean startStopped, OutputStream out) {
142+
super(new BlockCompressedOutputStream(out, null));
143+
origOut = out;
144+
stopped = startStopped;
145+
}
147146

148-
@Override
149-
public void write(int b) throws IOException {
150-
if (!stopped) {
151-
super.write(b);
147+
@Override
148+
public void write(int b) throws IOException {
149+
if (!stopped) {
150+
super.write(b);
151+
}
152152
}
153-
}
154153

155-
@Override
156-
public void write(byte[] b) throws IOException {
157-
if (!stopped) {
158-
super.write(b);
154+
@Override
155+
public void write(byte[] b) throws IOException {
156+
if (!stopped) {
157+
super.write(b);
158+
}
159159
}
160-
}
161160

162-
@Override
163-
public void write(byte[] b, int off, int len) throws IOException {
164-
if (!stopped) {
165-
super.write(b, off, len);
161+
@Override
162+
public void write(byte[] b, int off, int len) throws IOException {
163+
if (!stopped) {
164+
super.write(b, off, len);
165+
}
166166
}
167-
}
168167

169-
@Override
170-
public void close() throws IOException {
171-
// Don't close the BlockCompressedOutputStream, as we don't want
172-
// the BGZF terminator.
173-
this.out.flush();
168+
@Override
169+
public void close() throws IOException {
170+
// Don't close the BlockCompressedOutputStream, as we don't want
171+
// the BGZF terminator.
172+
this.out.flush();
174173

175-
// Instead, close the lower-level output stream directly.
176-
origOut.close();
174+
// Instead, close the lower-level output stream directly.
175+
origOut.close();
176+
}
177177
}
178178
}

0 commit comments

Comments
 (0)