##// END OF EJS Templates
strip: use the 'writenewbundle' function to get bundle on disk...
marmoute -
r32468:4c4d9190 default
parent child Browse files
Show More
@@ -1,358 +1,360 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 error,
20 error,
20 exchange,
21 exchange,
21 obsolete,
22 obsolete,
22 util,
23 util,
23 )
24 )
24
25
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
27 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
31 backupdir = "strip-backup"
29 backupdir = "strip-backup"
32 vfs = repo.vfs
30 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
31 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
32 vfs.mkdir(backupdir)
35
33
36 # Include a hash of all the nodes in the filename for uniqueness
34 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
35 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
36 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
39
40 cgversion = changegroup.safeversion(repo)
42 comp = None
41 comp = None
43 if cgversion != '01':
42 if cgversion != '01':
44 bundletype = "HG20"
43 bundletype = "HG20"
45 if compress:
44 if compress:
46 comp = 'BZ'
45 comp = 'BZ'
47 elif compress:
46 elif compress:
48 bundletype = "HG10BZ"
47 bundletype = "HG10BZ"
49 else:
48 else:
50 bundletype = "HG10UN"
49 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
50
52 compression=comp)
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 contentopts = {'cg.version': cgversion}
53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
54 outgoing, contentopts, vfs, compression=comp)
53
55
54 def _collectfiles(repo, striprev):
56 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
57 """find out the filelogs affected by the strip"""
56 files = set()
58 files = set()
57
59
58 for x in xrange(striprev, len(repo)):
60 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
61 files.update(repo[x].files())
60
62
61 return sorted(files)
63 return sorted(files)
62
64
63 def _collectbrokencsets(repo, files, striprev):
65 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
66 """return the changesets which will be broken by the truncation"""
65 s = set()
67 s = set()
66 def collectone(revlog):
68 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
69 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
70 s.update([revlog.linkrev(r) for r in brokenset])
69
71
70 collectone(repo.manifestlog._revlog)
72 collectone(repo.manifestlog._revlog)
71 for fname in files:
73 for fname in files:
72 collectone(repo.file(fname))
74 collectone(repo.file(fname))
73
75
74 return s
76 return s
75
77
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
79 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
80 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
81 # Simple way to maintain backwards compatibility for this
80 # argument.
82 # argument.
81 if backup in ['none', 'strip']:
83 if backup in ['none', 'strip']:
82 backup = False
84 backup = False
83
85
84 repo = repo.unfiltered()
86 repo = repo.unfiltered()
85 repo.destroying()
87 repo.destroying()
86
88
87 cl = repo.changelog
89 cl = repo.changelog
88 # TODO handle undo of merge sets
90 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
91 if isinstance(nodelist, str):
90 nodelist = [nodelist]
92 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
93 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
94 striprev = min(striplist)
93
95
94 files = _collectfiles(repo, striprev)
96 files = _collectfiles(repo, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
97 saverevs = _collectbrokencsets(repo, files, striprev)
96
98
97 # Some revisions with rev > striprev may not be descendants of striprev.
99 # Some revisions with rev > striprev may not be descendants of striprev.
98 # We have to find these revisions and put them in a bundle, so that
100 # We have to find these revisions and put them in a bundle, so that
99 # we can restore them after the truncations.
101 # we can restore them after the truncations.
100 # To create the bundle we use repo.changegroupsubset which requires
102 # To create the bundle we use repo.changegroupsubset which requires
101 # the list of heads and bases of the set of interesting revisions.
103 # the list of heads and bases of the set of interesting revisions.
102 # (head = revision in the set that has no descendant in the set;
104 # (head = revision in the set that has no descendant in the set;
103 # base = revision in the set that has no ancestor in the set)
105 # base = revision in the set that has no ancestor in the set)
104 tostrip = set(striplist)
106 tostrip = set(striplist)
105 saveheads = set(saverevs)
107 saveheads = set(saverevs)
106 for r in cl.revs(start=striprev + 1):
108 for r in cl.revs(start=striprev + 1):
107 if any(p in tostrip for p in cl.parentrevs(r)):
109 if any(p in tostrip for p in cl.parentrevs(r)):
108 tostrip.add(r)
110 tostrip.add(r)
109
111
110 if r not in tostrip:
112 if r not in tostrip:
111 saverevs.add(r)
113 saverevs.add(r)
112 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.add(r)
115 saveheads.add(r)
114 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
115
117
116 # compute base nodes
118 # compute base nodes
117 if saverevs:
119 if saverevs:
118 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
119 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
120 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
121 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
122
124
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # is much faster
126 # is much faster
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 if newbmtarget:
128 if newbmtarget:
127 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
128 else:
130 else:
129 newbmtarget = '.'
131 newbmtarget = '.'
130
132
131 bm = repo._bookmarks
133 bm = repo._bookmarks
132 updatebm = []
134 updatebm = []
133 for m in bm:
135 for m in bm:
134 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
135 if rev in tostrip:
137 if rev in tostrip:
136 updatebm.append(m)
138 updatebm.append(m)
137
139
138 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
139 backupfile = None
141 backupfile = None
140 vfs = repo.vfs
142 vfs = repo.vfs
141 node = nodelist[-1]
143 node = nodelist[-1]
142 if backup:
144 if backup:
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
145 vfs.join(backupfile))
147 vfs.join(backupfile))
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 vfs.join(backupfile))
149 vfs.join(backupfile))
148 tmpbundlefile = None
150 tmpbundlefile = None
149 if saveheads:
151 if saveheads:
150 # do not compress temporary bundle if we remove it from disk later
152 # do not compress temporary bundle if we remove it from disk later
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
154 compress=False)
153
155
154 mfst = repo.manifestlog._revlog
156 mfst = repo.manifestlog._revlog
155
157
156 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
157 if curtr is not None:
159 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
159 raise error.ProgrammingError('cannot strip from inside a transaction')
161 raise error.ProgrammingError('cannot strip from inside a transaction')
160
162
161 try:
163 try:
162 with repo.transaction("strip") as tr:
164 with repo.transaction("strip") as tr:
163 offset = len(tr.entries)
165 offset = len(tr.entries)
164
166
165 tr.startgroup()
167 tr.startgroup()
166 cl.strip(striprev, tr)
168 cl.strip(striprev, tr)
167 mfst.strip(striprev, tr)
169 mfst.strip(striprev, tr)
168 striptrees(repo, tr, striprev, files)
170 striptrees(repo, tr, striprev, files)
169
171
170 for fn in files:
172 for fn in files:
171 repo.file(fn).strip(striprev, tr)
173 repo.file(fn).strip(striprev, tr)
172 tr.endgroup()
174 tr.endgroup()
173
175
174 for i in xrange(offset, len(tr.entries)):
176 for i in xrange(offset, len(tr.entries)):
175 file, troffset, ignore = tr.entries[i]
177 file, troffset, ignore = tr.entries[i]
176 with repo.svfs(file, 'a', checkambig=True) as fp:
178 with repo.svfs(file, 'a', checkambig=True) as fp:
177 fp.truncate(troffset)
179 fp.truncate(troffset)
178 if troffset == 0:
180 if troffset == 0:
179 repo.store.markremoved(file)
181 repo.store.markremoved(file)
180
182
181 if tmpbundlefile:
183 if tmpbundlefile:
182 ui.note(_("adding branch\n"))
184 ui.note(_("adding branch\n"))
183 f = vfs.open(tmpbundlefile, "rb")
185 f = vfs.open(tmpbundlefile, "rb")
184 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
186 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
185 if not repo.ui.verbose:
187 if not repo.ui.verbose:
186 # silence internal shuffling chatter
188 # silence internal shuffling chatter
187 repo.ui.pushbuffer()
189 repo.ui.pushbuffer()
188 if isinstance(gen, bundle2.unbundle20):
190 if isinstance(gen, bundle2.unbundle20):
189 with repo.transaction('strip') as tr:
191 with repo.transaction('strip') as tr:
190 tr.hookargs = {'source': 'strip',
192 tr.hookargs = {'source': 'strip',
191 'url': 'bundle:' + vfs.join(tmpbundlefile)}
193 'url': 'bundle:' + vfs.join(tmpbundlefile)}
192 bundle2.applybundle(repo, gen, tr, source='strip',
194 bundle2.applybundle(repo, gen, tr, source='strip',
193 url='bundle:' + vfs.join(tmpbundlefile))
195 url='bundle:' + vfs.join(tmpbundlefile))
194 else:
196 else:
195 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
197 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
196 True)
198 True)
197 if not repo.ui.verbose:
199 if not repo.ui.verbose:
198 repo.ui.popbuffer()
200 repo.ui.popbuffer()
199 f.close()
201 f.close()
200 repo._phasecache.invalidate()
202 repo._phasecache.invalidate()
201
203
202 for m in updatebm:
204 for m in updatebm:
203 bm[m] = repo[newbmtarget].node()
205 bm[m] = repo[newbmtarget].node()
204
206
205 with repo.lock():
207 with repo.lock():
206 with repo.transaction('repair') as tr:
208 with repo.transaction('repair') as tr:
207 bm.recordchange(tr)
209 bm.recordchange(tr)
208
210
209 # remove undo files
211 # remove undo files
210 for undovfs, undofile in repo.undofiles():
212 for undovfs, undofile in repo.undofiles():
211 try:
213 try:
212 undovfs.unlink(undofile)
214 undovfs.unlink(undofile)
213 except OSError as e:
215 except OSError as e:
214 if e.errno != errno.ENOENT:
216 if e.errno != errno.ENOENT:
215 ui.warn(_('error removing %s: %s\n') %
217 ui.warn(_('error removing %s: %s\n') %
216 (undovfs.join(undofile), str(e)))
218 (undovfs.join(undofile), str(e)))
217
219
218 except: # re-raises
220 except: # re-raises
219 if backupfile:
221 if backupfile:
220 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
222 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
221 % vfs.join(backupfile))
223 % vfs.join(backupfile))
222 if tmpbundlefile:
224 if tmpbundlefile:
223 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
225 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
224 % vfs.join(tmpbundlefile))
226 % vfs.join(tmpbundlefile))
225 ui.warn(_("(fix the problem, then recover the changesets with "
227 ui.warn(_("(fix the problem, then recover the changesets with "
226 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
228 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
227 raise
229 raise
228 else:
230 else:
229 if tmpbundlefile:
231 if tmpbundlefile:
230 # Remove temporary bundle only if there were no exceptions
232 # Remove temporary bundle only if there were no exceptions
231 vfs.unlink(tmpbundlefile)
233 vfs.unlink(tmpbundlefile)
232
234
233 repo.destroyed()
235 repo.destroyed()
234 # return the backup file path (or None if 'backup' was False) so
236 # return the backup file path (or None if 'backup' was False) so
235 # extensions can use it
237 # extensions can use it
236 return backupfile
238 return backupfile
237
239
238 def striptrees(repo, tr, striprev, files):
240 def striptrees(repo, tr, striprev, files):
239 if 'treemanifest' in repo.requirements: # safe but unnecessary
241 if 'treemanifest' in repo.requirements: # safe but unnecessary
240 # otherwise
242 # otherwise
241 for unencoded, encoded, size in repo.store.datafiles():
243 for unencoded, encoded, size in repo.store.datafiles():
242 if (unencoded.startswith('meta/') and
244 if (unencoded.startswith('meta/') and
243 unencoded.endswith('00manifest.i')):
245 unencoded.endswith('00manifest.i')):
244 dir = unencoded[5:-12]
246 dir = unencoded[5:-12]
245 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
247 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
246
248
247 def rebuildfncache(ui, repo):
249 def rebuildfncache(ui, repo):
248 """Rebuilds the fncache file from repo history.
250 """Rebuilds the fncache file from repo history.
249
251
250 Missing entries will be added. Extra entries will be removed.
252 Missing entries will be added. Extra entries will be removed.
251 """
253 """
252 repo = repo.unfiltered()
254 repo = repo.unfiltered()
253
255
254 if 'fncache' not in repo.requirements:
256 if 'fncache' not in repo.requirements:
255 ui.warn(_('(not rebuilding fncache because repository does not '
257 ui.warn(_('(not rebuilding fncache because repository does not '
256 'support fncache)\n'))
258 'support fncache)\n'))
257 return
259 return
258
260
259 with repo.lock():
261 with repo.lock():
260 fnc = repo.store.fncache
262 fnc = repo.store.fncache
261 # Trigger load of fncache.
263 # Trigger load of fncache.
262 if 'irrelevant' in fnc:
264 if 'irrelevant' in fnc:
263 pass
265 pass
264
266
265 oldentries = set(fnc.entries)
267 oldentries = set(fnc.entries)
266 newentries = set()
268 newentries = set()
267 seenfiles = set()
269 seenfiles = set()
268
270
269 repolen = len(repo)
271 repolen = len(repo)
270 for rev in repo:
272 for rev in repo:
271 ui.progress(_('rebuilding'), rev, total=repolen,
273 ui.progress(_('rebuilding'), rev, total=repolen,
272 unit=_('changesets'))
274 unit=_('changesets'))
273
275
274 ctx = repo[rev]
276 ctx = repo[rev]
275 for f in ctx.files():
277 for f in ctx.files():
276 # This is to minimize I/O.
278 # This is to minimize I/O.
277 if f in seenfiles:
279 if f in seenfiles:
278 continue
280 continue
279 seenfiles.add(f)
281 seenfiles.add(f)
280
282
281 i = 'data/%s.i' % f
283 i = 'data/%s.i' % f
282 d = 'data/%s.d' % f
284 d = 'data/%s.d' % f
283
285
284 if repo.store._exists(i):
286 if repo.store._exists(i):
285 newentries.add(i)
287 newentries.add(i)
286 if repo.store._exists(d):
288 if repo.store._exists(d):
287 newentries.add(d)
289 newentries.add(d)
288
290
289 ui.progress(_('rebuilding'), None)
291 ui.progress(_('rebuilding'), None)
290
292
291 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
292 for dir in util.dirs(seenfiles):
294 for dir in util.dirs(seenfiles):
293 i = 'meta/%s/00manifest.i' % dir
295 i = 'meta/%s/00manifest.i' % dir
294 d = 'meta/%s/00manifest.d' % dir
296 d = 'meta/%s/00manifest.d' % dir
295
297
296 if repo.store._exists(i):
298 if repo.store._exists(i):
297 newentries.add(i)
299 newentries.add(i)
298 if repo.store._exists(d):
300 if repo.store._exists(d):
299 newentries.add(d)
301 newentries.add(d)
300
302
301 addcount = len(newentries - oldentries)
303 addcount = len(newentries - oldentries)
302 removecount = len(oldentries - newentries)
304 removecount = len(oldentries - newentries)
303 for p in sorted(oldentries - newentries):
305 for p in sorted(oldentries - newentries):
304 ui.write(_('removing %s\n') % p)
306 ui.write(_('removing %s\n') % p)
305 for p in sorted(newentries - oldentries):
307 for p in sorted(newentries - oldentries):
306 ui.write(_('adding %s\n') % p)
308 ui.write(_('adding %s\n') % p)
307
309
308 if addcount or removecount:
310 if addcount or removecount:
309 ui.write(_('%d items added, %d removed from fncache\n') %
311 ui.write(_('%d items added, %d removed from fncache\n') %
310 (addcount, removecount))
312 (addcount, removecount))
311 fnc.entries = newentries
313 fnc.entries = newentries
312 fnc._dirty = True
314 fnc._dirty = True
313
315
314 with repo.transaction('fncache') as tr:
316 with repo.transaction('fncache') as tr:
315 fnc.write(tr)
317 fnc.write(tr)
316 else:
318 else:
317 ui.write(_('fncache already up to date\n'))
319 ui.write(_('fncache already up to date\n'))
318
320
319 def stripbmrevset(repo, mark):
321 def stripbmrevset(repo, mark):
320 """
322 """
321 The revset to strip when strip is called with -B mark
323 The revset to strip when strip is called with -B mark
322
324
323 Needs to live here so extensions can use it and wrap it even when strip is
325 Needs to live here so extensions can use it and wrap it even when strip is
324 not enabled or not present on a box.
326 not enabled or not present on a box.
325 """
327 """
326 return repo.revs("ancestors(bookmark(%s)) - "
328 return repo.revs("ancestors(bookmark(%s)) - "
327 "ancestors(head() and not bookmark(%s)) - "
329 "ancestors(head() and not bookmark(%s)) - "
328 "ancestors(bookmark() and not bookmark(%s))",
330 "ancestors(bookmark() and not bookmark(%s))",
329 mark, mark, mark)
331 mark, mark, mark)
330
332
331 def deleteobsmarkers(obsstore, indices):
333 def deleteobsmarkers(obsstore, indices):
332 """Delete some obsmarkers from obsstore and return how many were deleted
334 """Delete some obsmarkers from obsstore and return how many were deleted
333
335
334 'indices' is a list of ints which are the indices
336 'indices' is a list of ints which are the indices
335 of the markers to be deleted.
337 of the markers to be deleted.
336
338
337 Every invocation of this function completely rewrites the obsstore file,
339 Every invocation of this function completely rewrites the obsstore file,
338 skipping the markers we want to be removed. The new temporary file is
340 skipping the markers we want to be removed. The new temporary file is
339 created, remaining markers are written there and on .close() this file
341 created, remaining markers are written there and on .close() this file
340 gets atomically renamed to obsstore, thus guaranteeing consistency."""
342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
341 if not indices:
343 if not indices:
342 # we don't want to rewrite the obsstore with the same content
344 # we don't want to rewrite the obsstore with the same content
343 return
345 return
344
346
345 left = []
347 left = []
346 current = obsstore._all
348 current = obsstore._all
347 n = 0
349 n = 0
348 for i, m in enumerate(current):
350 for i, m in enumerate(current):
349 if i in indices:
351 if i in indices:
350 n += 1
352 n += 1
351 continue
353 continue
352 left.append(m)
354 left.append(m)
353
355
354 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
355 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
356 newobsstorefile.write(bytes)
358 newobsstorefile.write(bytes)
357 newobsstorefile.close()
359 newobsstorefile.close()
358 return n
360 return n
General Comments 0
You need to be logged in to leave comments. Login now