##// END OF EJS Templates
hg qrecord -- like record, but for mq...
hg qrecord -- like record, but for mq I'm a former Darcs user, and I've discovered that it is very convenient to actually perform development using MQ first, and only when the patches are 'ready' move them to project's history in stone. Usually I work on some topic, temporarily forgetting about any version control, and just do coding, experimenting, debugging, etc. After some time, I approach a moment, where my work should actually go to patches/commits, and here is the problem:: As it is now, there is no way to put part of the changes into one patch, and another part of the changes into second patch. This works, but only when changes are touching separate files, and for semantically different changes touching the same file(s) there is now pretty way to put them into separate patches. For some time, I've tolerated the pain to run vim patches/... and move hunks between files by hand, but I think this affects my productivity badly. So, here is the first step towards untiing the problem: Let's use 'hg qrecord' for mq, like we use 'hg record' for usual commits!

File last commit:

r5368:61462e7d default
r5830:c32d41af default
Show More
changegroup.py
122 lines | 3.5 KiB | text/x-python | PythonLexer
"""
changegroup.py - Mercurial changegroup manipulation functions
Copyright 2006 Matt Mackall <mpm@selenic.com>
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
"""
from i18n import _
import struct, os, bz2, zlib, util, tempfile
def getchunk(source):
"""get a chunk from a changegroup"""
d = source.read(4)
if not d:
return ""
l = struct.unpack(">l", d)[0]
if l <= 4:
return ""
d = source.read(l - 4)
if len(d) < l - 4:
raise util.Abort(_("premature EOF reading chunk"
" (got %d bytes, expected %d)")
% (len(d), l - 4))
return d
def chunkiter(source):
"""iterate through the chunks in source"""
while 1:
c = getchunk(source)
if not c:
break
yield c
def chunkheader(length):
"""build a changegroup chunk header"""
return struct.pack(">l", length + 4)
def closechunk():
return struct.pack(">l", 0)
class nocompress(object):
def compress(self, x):
return x
def flush(self):
return ""
bundletypes = {
"": ("", nocompress),
"HG10UN": ("HG10UN", nocompress),
"HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
"HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
}
def writebundle(cg, filename, bundletype):
"""Write a bundle file and return its filename.
Existing files will not be overwritten.
If no filename is specified, a temporary file is created.
bz2 compression can be turned off.
The bundle file will be deleted in case of errors.
"""
fh = None
cleanup = None
try:
if filename:
fh = open(filename, "wb")
else:
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
fh = os.fdopen(fd, "wb")
cleanup = filename
header, compressor = bundletypes[bundletype]
fh.write(header)
z = compressor()
# parse the changegroup data, otherwise we will block
# in case of sshrepo because we don't know the end of the stream
# an empty chunkiter is the end of the changegroup
empty = False
while not empty:
empty = True
for chunk in chunkiter(cg):
empty = False
fh.write(z.compress(chunkheader(len(chunk))))
pos = 0
while pos < len(chunk):
next = pos + 2**20
fh.write(z.compress(chunk[pos:next]))
pos = next
fh.write(z.compress(closechunk()))
fh.write(z.flush())
cleanup = None
return filename
finally:
if fh is not None:
fh.close()
if cleanup is not None:
os.unlink(cleanup)
def readbundle(fh, fname):
header = fh.read(6)
if not header.startswith("HG"):
raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
elif not header.startswith("HG10"):
raise util.Abort(_("%s: unknown bundle version") % fname)
if header == "HG10BZ":
def generator(f):
zd = bz2.BZ2Decompressor()
zd.decompress("BZ")
for chunk in util.filechunkiter(f, 4096):
yield zd.decompress(chunk)
return util.chunkbuffer(generator(fh))
elif header == "HG10UN":
return fh
raise util.Abort(_("%s: unknown bundle compression type")
% fname)