##// END OF EJS Templates
make incoming work via ssh (issue139); move chunk code into separate module....
Thomas Arendsen Hein -
r1981:736b6c96 default
parent child Browse files
Show More
@@ -0,0 +1,43 b''
1 """
2 changegroup.py - Mercurial changegroup manipulation functions
3
4 Copyright 2006 Matt Mackall <mpm@selenic.com>
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8 """
9 import struct
10 from demandload import *
11 demandload(globals(), "util")
12
13 def getchunk(source):
14 """get a chunk from a changegroup"""
15 d = source.read(4)
16 if not d:
17 return ""
18 l = struct.unpack(">l", d)[0]
19 if l <= 4:
20 return ""
21 d = source.read(l - 4)
22 if len(d) < l - 4:
23 raise util.Abort(_("premature EOF reading chunk"
24 " (got %d bytes, expected %d)")
25 % (len(d), l - 4))
26 return d
27
28 def chunkiter(source):
29 """iterate through the chunks in source"""
30 while 1:
31 c = getchunk(source)
32 if not c:
33 break
34 yield c
35
36 def genchunk(data):
37 """build a changegroup chunk"""
38 header = struct.pack(">l", len(data)+ 4)
39 return "%s%s" % (header, data)
40
41 def closechunk():
42 return struct.pack(">l", 0)
43
@@ -1,219 +1,201 b''
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 demandload(globals(), "util os struct")
16 demandload(globals(), "changegroup util os struct")
17 17
18 18 import localrepo, changelog, manifest, filelog, revlog
19 19
20 def getchunk(source):
21 """get a chunk from a group"""
22 d = source.read(4)
23 if not d:
24 return ""
25 l = struct.unpack(">l", d)[0]
26 if l <= 4:
27 return ""
28 d = source.read(l - 4)
29 if len(d) < l - 4:
30 raise util.Abort(_("premature EOF reading chunk"
31 " (got %d bytes, expected %d)")
32 % (len(d), l - 4))
33 return d
34
35 20 class bundlerevlog(revlog.revlog):
36 21 def __init__(self, opener, indexfile, datafile, bundlefile,
37 22 linkmapper=None):
38 23 # How it works:
39 24 # to retrieve a revision, we need to know the offset of
40 25 # the revision in the bundlefile (an opened file).
41 26 #
42 27 # We store this offset in the index (start), to differentiate a
43 28 # rev in the bundle and from a rev in the revlog, we check
44 29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
45 30 # (it is bigger since we store the node to which the delta is)
46 31 #
47 32 revlog.revlog.__init__(self, opener, indexfile, datafile)
48 33 self.bundlefile = bundlefile
49 def genchunk():
50 while 1:
34 def chunkpositer():
35 for chunk in changegroup.chunkiter(bundlefile):
51 36 pos = bundlefile.tell()
52 chunk = getchunk(bundlefile)
53 if not chunk:
54 break
55 yield chunk, pos + 4 # XXX struct.calcsize(">l") == 4
37 yield chunk, pos - len(chunk)
56 38 n = self.count()
57 39 prev = None
58 for chunk, start in genchunk():
40 for chunk, start in chunkpositer():
59 41 size = len(chunk)
60 42 if size < 80:
61 43 raise util.Abort("invalid changegroup")
62 44 start += 80
63 45 size -= 80
64 46 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
65 47 if node in self.nodemap:
66 48 prev = node
67 49 continue
68 50 for p in (p1, p2):
69 51 if not p in self.nodemap:
70 52 raise RevlogError(_("unknown parent %s") % short(p1))
71 53 if linkmapper is None:
72 54 link = n
73 55 else:
74 56 link = linkmapper(cs)
75 57
76 58 if not prev:
77 59 prev = p1
78 60 # start, size, base is not used, link, p1, p2, delta ref
79 61 e = (start, size, None, link, p1, p2, node, prev)
80 62 self.index.append(e)
81 63 self.nodemap[node] = n
82 64 prev = node
83 65 n += 1
84 66
85 67 def bundle(self, rev):
86 68 """is rev from the bundle"""
87 69 if rev < 0:
88 70 return False
89 71 return len(self.index[rev]) > 7
90 72 def bundlebase(self, rev): return self.index[rev][7]
91 73 def chunk(self, rev):
92 74 # Warning: in case of bundle, the diff is against bundlebase,
93 75 # not against rev - 1
94 76 # XXX: could use some caching
95 77 if not self.bundle(rev):
96 78 return revlog.revlog.chunk(self, rev)
97 79 self.bundlefile.seek(self.start(rev))
98 80 return self.bundlefile.read(self.length(rev))
99 81
100 82 def revdiff(self, rev1, rev2):
101 83 """return or calculate a delta between two revisions"""
102 84 if self.bundle(rev1) and self.bundle(rev2):
103 85 # hot path for bundle
104 86 revb = self.rev(self.bundlebase(rev2))
105 87 if revb == rev1:
106 88 return self.chunk(rev2)
107 89 elif not self.bundle(rev1) and not self.bundle(rev2):
108 90 return revlog.revlog.chunk(self, rev1, rev2)
109 91
110 92 return self.diff(self.revision(self.node(rev1)),
111 93 self.revision(self.node(rev2)))
112 94
113 95 def revision(self, node):
114 96 """return an uncompressed revision of a given"""
115 97 if node == nullid: return ""
116 98
117 99 text = None
118 100 chain = []
119 101 iter_node = node
120 102 rev = self.rev(iter_node)
121 103 # reconstruct the revision if it is from a changegroup
122 104 while self.bundle(rev):
123 105 if self.cache and self.cache[0] == iter_node:
124 106 text = self.cache[2]
125 107 break
126 108 chain.append(rev)
127 109 iter_node = self.bundlebase(rev)
128 110 rev = self.rev(iter_node)
129 111 if text is None:
130 112 text = revlog.revlog.revision(self, iter_node)
131 113
132 114 while chain:
133 115 delta = self.chunk(chain.pop())
134 116 text = self.patches(text, [delta])
135 117
136 118 p1, p2 = self.parents(node)
137 119 if node != revlog.hash(text, p1, p2):
138 120 raise RevlogError(_("integrity check failed on %s:%d")
139 121 % (self.datafile, self.rev(node)))
140 122
141 123 self.cache = (node, rev, text)
142 124 return text
143 125
144 126 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
145 127 raise NotImplementedError
146 128 def addgroup(self, revs, linkmapper, transaction, unique=0):
147 129 raise NotImplementedError
148 130 def strip(self, rev, minlink):
149 131 raise NotImplementedError
150 132 def checksize(self):
151 133 raise NotImplementedError
152 134
153 135 class bundlechangelog(bundlerevlog, changelog.changelog):
154 136 def __init__(self, opener, bundlefile):
155 137 changelog.changelog.__init__(self, opener)
156 138 bundlerevlog.__init__(self, opener, "00changelog.i", "00changelog.d",
157 139 bundlefile)
158 140
159 141 class bundlemanifest(bundlerevlog, manifest.manifest):
160 142 def __init__(self, opener, bundlefile, linkmapper):
161 143 manifest.manifest.__init__(self, opener)
162 144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
163 145 bundlefile, linkmapper)
164 146
165 147 class bundlefilelog(bundlerevlog, filelog.filelog):
166 148 def __init__(self, opener, path, bundlefile, linkmapper):
167 149 filelog.filelog.__init__(self, opener, path)
168 150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
169 151 bundlefile, linkmapper)
170 152
171 153 class bundlerepository(localrepo.localrepository):
172 154 def __init__(self, ui, path, bundlename):
173 155 localrepo.localrepository.__init__(self, ui, path)
174 156 f = open(bundlename, "rb")
175 157 s = os.fstat(f.fileno())
176 158 self.bundlefile = f
177 159 header = self.bundlefile.read(6)
178 160 if not header.startswith("HG"):
179 161 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
180 162 elif not header.startswith("HG10"):
181 163 raise util.Abort(_("%s: unknown bundle version") % bundlename)
182 164 elif header == "HG10BZ":
183 165 raise util.Abort(_("%s: compressed bundle not supported")
184 166 % bundlename)
185 167 elif header == "HG10UN":
186 168 # uncompressed bundle supported
187 169 pass
188 170 else:
189 171 raise util.Abort(_("%s: unknown bundle compression type")
190 172 % bundlename)
191 173 self.changelog = bundlechangelog(self.opener, self.bundlefile)
192 174 self.manifest = bundlemanifest(self.opener, self.bundlefile,
193 175 self.changelog.rev)
194 176 # dict with the mapping 'filename' -> position in the bundle
195 177 self.bundlefilespos = {}
196 178 while 1:
197 f = getchunk(self.bundlefile)
198 if not f:
199 break
200 self.bundlefilespos[f] = self.bundlefile.tell()
201 while getchunk(self.bundlefile):
202 pass
179 f = changegroup.getchunk(self.bundlefile)
180 if not f:
181 break
182 self.bundlefilespos[f] = self.bundlefile.tell()
183 for c in changegroup.chunkiter(self.bundlefile):
184 pass
203 185
204 186 def dev(self):
205 187 return -1
206 188
207 189 def file(self, f):
208 190 if f[0] == '/':
209 191 f = f[1:]
210 192 if f in self.bundlefilespos:
211 193 self.bundlefile.seek(self.bundlefilespos[f])
212 194 return bundlefilelog(self.opener, f, self.bundlefile,
213 195 self.changelog.rev)
214 196 else:
215 197 return filelog.filelog(self.opener, f)
216 198
217 199 def close(self):
218 200 """Close assigned bundle file immediately."""
219 201 self.bundlefile.close()
@@ -1,3298 +1,3305 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from node import *
10 10 from i18n import gettext as _
11 11 demandload(globals(), "os re sys signal shutil imp urllib pdb")
12 12 demandload(globals(), "fancyopts ui hg util lock revlog templater bundlerepo")
13 13 demandload(globals(), "fnmatch hgweb mdiff random signal tempfile time")
14 14 demandload(globals(), "traceback errno socket version struct atexit sets bz2")
15 demandload(globals(), "changegroup")
15 16
16 17 class UnknownCommand(Exception):
17 18 """Exception raised if command is not in the command table."""
18 19 class AmbiguousCommand(Exception):
19 20 """Exception raised if command shortcut matches more than one command."""
20 21
21 22 def filterfiles(filters, files):
22 23 l = [x for x in files if x in filters]
23 24
24 25 for t in filters:
25 26 if t and t[-1] != "/":
26 27 t += "/"
27 28 l += [x for x in files if x.startswith(t)]
28 29 return l
29 30
30 31 def relpath(repo, args):
31 32 cwd = repo.getcwd()
32 33 if cwd:
33 34 return [util.normpath(os.path.join(cwd, x)) for x in args]
34 35 return args
35 36
36 37 def matchpats(repo, pats=[], opts={}, head=''):
37 38 cwd = repo.getcwd()
38 39 if not pats and cwd:
39 40 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
40 41 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
41 42 cwd = ''
42 43 return util.cmdmatcher(repo.root, cwd, pats or ['.'], opts.get('include'),
43 44 opts.get('exclude'), head)
44 45
45 46 def makewalk(repo, pats, opts, node=None, head=''):
46 47 files, matchfn, anypats = matchpats(repo, pats, opts, head)
47 48 exact = dict(zip(files, files))
48 49 def walk():
49 50 for src, fn in repo.walk(node=node, files=files, match=matchfn):
50 51 yield src, fn, util.pathto(repo.getcwd(), fn), fn in exact
51 52 return files, matchfn, walk()
52 53
53 54 def walk(repo, pats, opts, node=None, head=''):
54 55 files, matchfn, results = makewalk(repo, pats, opts, node, head)
55 56 for r in results:
56 57 yield r
57 58
58 59 def walkchangerevs(ui, repo, pats, opts):
59 60 '''Iterate over files and the revs they changed in.
60 61
61 62 Callers most commonly need to iterate backwards over the history
62 63 it is interested in. Doing so has awful (quadratic-looking)
63 64 performance, so we use iterators in a "windowed" way.
64 65
65 66 We walk a window of revisions in the desired order. Within the
66 67 window, we first walk forwards to gather data, then in the desired
67 68 order (usually backwards) to display it.
68 69
69 70 This function returns an (iterator, getchange, matchfn) tuple. The
70 71 getchange function returns the changelog entry for a numeric
71 72 revision. The iterator yields 3-tuples. They will be of one of
72 73 the following forms:
73 74
74 75 "window", incrementing, lastrev: stepping through a window,
75 76 positive if walking forwards through revs, last rev in the
76 77 sequence iterated over - use to reset state for the current window
77 78
78 79 "add", rev, fns: out-of-order traversal of the given file names
79 80 fns, which changed during revision rev - use to gather data for
80 81 possible display
81 82
82 83 "iter", rev, None: in-order traversal of the revs earlier iterated
83 84 over with "add" - use to display data'''
84 85
85 86 def increasing_windows(start, end, windowsize=8, sizelimit=512):
86 87 if start < end:
87 88 while start < end:
88 89 yield start, min(windowsize, end-start)
89 90 start += windowsize
90 91 if windowsize < sizelimit:
91 92 windowsize *= 2
92 93 else:
93 94 while start > end:
94 95 yield start, min(windowsize, start-end-1)
95 96 start -= windowsize
96 97 if windowsize < sizelimit:
97 98 windowsize *= 2
98 99
99 100
100 101 files, matchfn, anypats = matchpats(repo, pats, opts)
101 102
102 103 if repo.changelog.count() == 0:
103 104 return [], False, matchfn
104 105
105 106 revs = map(int, revrange(ui, repo, opts['rev'] or ['tip:0']))
106 107 wanted = {}
107 108 slowpath = anypats
108 109 fncache = {}
109 110
110 111 chcache = {}
111 112 def getchange(rev):
112 113 ch = chcache.get(rev)
113 114 if ch is None:
114 115 chcache[rev] = ch = repo.changelog.read(repo.lookup(str(rev)))
115 116 return ch
116 117
117 118 if not slowpath and not files:
118 119 # No files, no patterns. Display all revs.
119 120 wanted = dict(zip(revs, revs))
120 121 if not slowpath:
121 122 # Only files, no patterns. Check the history of each file.
122 123 def filerevgen(filelog):
123 124 for i, window in increasing_windows(filelog.count()-1, -1):
124 125 revs = []
125 126 for j in xrange(i - window, i + 1):
126 127 revs.append(filelog.linkrev(filelog.node(j)))
127 128 revs.reverse()
128 129 for rev in revs:
129 130 yield rev
130 131
131 132 minrev, maxrev = min(revs), max(revs)
132 133 for file_ in files:
133 134 filelog = repo.file(file_)
134 135 # A zero count may be a directory or deleted file, so
135 136 # try to find matching entries on the slow path.
136 137 if filelog.count() == 0:
137 138 slowpath = True
138 139 break
139 140 for rev in filerevgen(filelog):
140 141 if rev <= maxrev:
141 142 if rev < minrev:
142 143 break
143 144 fncache.setdefault(rev, [])
144 145 fncache[rev].append(file_)
145 146 wanted[rev] = 1
146 147 if slowpath:
147 148 # The slow path checks files modified in every changeset.
148 149 def changerevgen():
149 150 for i, window in increasing_windows(repo.changelog.count()-1, -1):
150 151 for j in xrange(i - window, i + 1):
151 152 yield j, getchange(j)[3]
152 153
153 154 for rev, changefiles in changerevgen():
154 155 matches = filter(matchfn, changefiles)
155 156 if matches:
156 157 fncache[rev] = matches
157 158 wanted[rev] = 1
158 159
159 160 def iterate():
160 161 for i, window in increasing_windows(0, len(revs)):
161 162 yield 'window', revs[0] < revs[-1], revs[-1]
162 163 nrevs = [rev for rev in revs[i:i+window]
163 164 if rev in wanted]
164 165 srevs = list(nrevs)
165 166 srevs.sort()
166 167 for rev in srevs:
167 168 fns = fncache.get(rev) or filter(matchfn, getchange(rev)[3])
168 169 yield 'add', rev, fns
169 170 for rev in nrevs:
170 171 yield 'iter', rev, None
171 172 return iterate(), getchange, matchfn
172 173
173 174 revrangesep = ':'
174 175
175 176 def revrange(ui, repo, revs, revlog=None):
176 177 """Yield revision as strings from a list of revision specifications."""
177 178 if revlog is None:
178 179 revlog = repo.changelog
179 180 revcount = revlog.count()
180 181 def fix(val, defval):
181 182 if not val:
182 183 return defval
183 184 try:
184 185 num = int(val)
185 186 if str(num) != val:
186 187 raise ValueError
187 188 if num < 0:
188 189 num += revcount
189 190 if num < 0:
190 191 num = 0
191 192 elif num >= revcount:
192 193 raise ValueError
193 194 except ValueError:
194 195 try:
195 196 num = repo.changelog.rev(repo.lookup(val))
196 197 except KeyError:
197 198 try:
198 199 num = revlog.rev(revlog.lookup(val))
199 200 except KeyError:
200 201 raise util.Abort(_('invalid revision identifier %s'), val)
201 202 return num
202 203 seen = {}
203 204 for spec in revs:
204 205 if spec.find(revrangesep) >= 0:
205 206 start, end = spec.split(revrangesep, 1)
206 207 start = fix(start, 0)
207 208 end = fix(end, revcount - 1)
208 209 step = start > end and -1 or 1
209 210 for rev in xrange(start, end+step, step):
210 211 if rev in seen:
211 212 continue
212 213 seen[rev] = 1
213 214 yield str(rev)
214 215 else:
215 216 rev = fix(spec, None)
216 217 if rev in seen:
217 218 continue
218 219 seen[rev] = 1
219 220 yield str(rev)
220 221
221 222 def make_filename(repo, r, pat, node=None,
222 223 total=None, seqno=None, revwidth=None, pathname=None):
223 224 node_expander = {
224 225 'H': lambda: hex(node),
225 226 'R': lambda: str(r.rev(node)),
226 227 'h': lambda: short(node),
227 228 }
228 229 expander = {
229 230 '%': lambda: '%',
230 231 'b': lambda: os.path.basename(repo.root),
231 232 }
232 233
233 234 try:
234 235 if node:
235 236 expander.update(node_expander)
236 237 if node and revwidth is not None:
237 238 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
238 239 if total is not None:
239 240 expander['N'] = lambda: str(total)
240 241 if seqno is not None:
241 242 expander['n'] = lambda: str(seqno)
242 243 if total is not None and seqno is not None:
243 244 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
244 245 if pathname is not None:
245 246 expander['s'] = lambda: os.path.basename(pathname)
246 247 expander['d'] = lambda: os.path.dirname(pathname) or '.'
247 248 expander['p'] = lambda: pathname
248 249
249 250 newname = []
250 251 patlen = len(pat)
251 252 i = 0
252 253 while i < patlen:
253 254 c = pat[i]
254 255 if c == '%':
255 256 i += 1
256 257 c = pat[i]
257 258 c = expander[c]()
258 259 newname.append(c)
259 260 i += 1
260 261 return ''.join(newname)
261 262 except KeyError, inst:
262 263 raise util.Abort(_("invalid format spec '%%%s' in output file name"),
263 264 inst.args[0])
264 265
265 266 def make_file(repo, r, pat, node=None,
266 267 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
267 268 if not pat or pat == '-':
268 269 return 'w' in mode and sys.stdout or sys.stdin
269 270 if hasattr(pat, 'write') and 'w' in mode:
270 271 return pat
271 272 if hasattr(pat, 'read') and 'r' in mode:
272 273 return pat
273 274 return open(make_filename(repo, r, pat, node, total, seqno, revwidth,
274 275 pathname),
275 276 mode)
276 277
277 278 def write_bundle(cg, filename=None, compress=True):
278 279 """Write a bundle file and return its filename.
279 280
280 281 Existing files will not be overwritten.
281 282 If no filename is specified, a temporary file is created.
282 283 bz2 compression can be turned off.
283 284 The bundle file will be deleted in case of errors.
284 285 """
285 286 class nocompress(object):
286 287 def compress(self, x):
287 288 return x
288 289 def flush(self):
289 290 return ""
290 291
291 292 fh = None
292 293 cleanup = None
293 294 try:
294 295 if filename:
295 296 if os.path.exists(filename):
296 297 raise util.Abort(_("file '%s' already exists"), filename)
297 298 fh = open(filename, "wb")
298 299 else:
299 300 fd, filename = tempfile.mkstemp(suffix=".hg", prefix="hg-bundle-")
300 301 fh = os.fdopen(fd, "wb")
301 302 cleanup = filename
302 303
303 304 if compress:
304 305 fh.write("HG10")
305 306 z = bz2.BZ2Compressor(9)
306 307 else:
307 308 fh.write("HG10UN")
308 309 z = nocompress()
309 while 1:
310 chunk = cg.read(4096)
311 if not chunk:
312 break
313 fh.write(z.compress(chunk))
310 # parse the changegroup data, otherwise we will block
311 # in case of sshrepo because we don't know the end of the stream
312
313 # an empty chunkiter is the end of the changegroup
314 empty = False
315 while not empty:
316 empty = True
317 for chunk in changegroup.chunkiter(cg):
318 empty = False
319 fh.write(z.compress(changegroup.genchunk(chunk)))
320 fh.write(z.compress(changegroup.closechunk()))
314 321 fh.write(z.flush())
315 322 cleanup = None
316 323 return filename
317 324 finally:
318 325 if fh is not None:
319 326 fh.close()
320 327 if cleanup is not None:
321 328 os.unlink(cleanup)
322 329
323 330 def dodiff(fp, ui, repo, node1, node2, files=None, match=util.always,
324 331 changes=None, text=False, opts={}):
325 332 if not node1:
326 333 node1 = repo.dirstate.parents()[0]
327 334 # reading the data for node1 early allows it to play nicely
328 335 # with repo.changes and the revlog cache.
329 336 change = repo.changelog.read(node1)
330 337 mmap = repo.manifest.read(change[0])
331 338 date1 = util.datestr(change[2])
332 339
333 340 if not changes:
334 341 changes = repo.changes(node1, node2, files, match=match)
335 342 modified, added, removed, deleted, unknown = changes
336 343 if files:
337 344 modified, added, removed = map(lambda x: filterfiles(files, x),
338 345 (modified, added, removed))
339 346
340 347 if not modified and not added and not removed:
341 348 return
342 349
343 350 if node2:
344 351 change = repo.changelog.read(node2)
345 352 mmap2 = repo.manifest.read(change[0])
346 353 date2 = util.datestr(change[2])
347 354 def read(f):
348 355 return repo.file(f).read(mmap2[f])
349 356 else:
350 357 date2 = util.datestr()
351 358 def read(f):
352 359 return repo.wread(f)
353 360
354 361 if ui.quiet:
355 362 r = None
356 363 else:
357 364 hexfunc = ui.verbose and hex or short
358 365 r = [hexfunc(node) for node in [node1, node2] if node]
359 366
360 367 diffopts = ui.diffopts()
361 368 showfunc = opts.get('show_function') or diffopts['showfunc']
362 369 ignorews = opts.get('ignore_all_space') or diffopts['ignorews']
363 370 for f in modified:
364 371 to = None
365 372 if f in mmap:
366 373 to = repo.file(f).read(mmap[f])
367 374 tn = read(f)
368 375 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
369 376 showfunc=showfunc, ignorews=ignorews))
370 377 for f in added:
371 378 to = None
372 379 tn = read(f)
373 380 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
374 381 showfunc=showfunc, ignorews=ignorews))
375 382 for f in removed:
376 383 to = repo.file(f).read(mmap[f])
377 384 tn = None
378 385 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
379 386 showfunc=showfunc, ignorews=ignorews))
380 387
381 388 def trimuser(ui, name, rev, revcache):
382 389 """trim the name of the user who committed a change"""
383 390 user = revcache.get(rev)
384 391 if user is None:
385 392 user = revcache[rev] = ui.shortuser(name)
386 393 return user
387 394
388 395 class changeset_templater(object):
389 396 '''use templater module to format changeset information.'''
390 397
391 398 def __init__(self, ui, repo, mapfile):
392 399 self.t = templater.templater(mapfile, templater.common_filters,
393 400 cache={'parent': '{rev}:{node|short} ',
394 401 'manifest': '{rev}:{node|short}'})
395 402 self.ui = ui
396 403 self.repo = repo
397 404
398 405 def use_template(self, t):
399 406 '''set template string to use'''
400 407 self.t.cache['changeset'] = t
401 408
402 409 def write(self, thing):
403 410 '''write expanded template.
404 411 uses in-order recursive traverse of iterators.'''
405 412 for t in thing:
406 413 if hasattr(t, '__iter__'):
407 414 self.write(t)
408 415 else:
409 416 self.ui.write(t)
410 417
411 418 def show(self, rev=0, changenode=None, brinfo=None):
412 419 '''show a single changeset or file revision'''
413 420 log = self.repo.changelog
414 421 if changenode is None:
415 422 changenode = log.node(rev)
416 423 elif not rev:
417 424 rev = log.rev(changenode)
418 425
419 426 changes = log.read(changenode)
420 427
421 428 def showlist(name, values, plural=None, **args):
422 429 '''expand set of values.
423 430 name is name of key in template map.
424 431 values is list of strings or dicts.
425 432 plural is plural of name, if not simply name + 's'.
426 433
427 434 expansion works like this, given name 'foo'.
428 435
429 436 if values is empty, expand 'no_foos'.
430 437
431 438 if 'foo' not in template map, return values as a string,
432 439 joined by space.
433 440
434 441 expand 'start_foos'.
435 442
436 443 for each value, expand 'foo'. if 'last_foo' in template
437 444 map, expand it instead of 'foo' for last key.
438 445
439 446 expand 'end_foos'.
440 447 '''
441 448 if plural: names = plural
442 449 else: names = name + 's'
443 450 if not values:
444 451 noname = 'no_' + names
445 452 if noname in self.t:
446 453 yield self.t(noname, **args)
447 454 return
448 455 if name not in self.t:
449 456 if isinstance(values[0], str):
450 457 yield ' '.join(values)
451 458 else:
452 459 for v in values:
453 460 yield dict(v, **args)
454 461 return
455 462 startname = 'start_' + names
456 463 if startname in self.t:
457 464 yield self.t(startname, **args)
458 465 vargs = args.copy()
459 466 def one(v, tag=name):
460 467 try:
461 468 vargs.update(v)
462 469 except (AttributeError, ValueError):
463 470 try:
464 471 for a, b in v:
465 472 vargs[a] = b
466 473 except ValueError:
467 474 vargs[name] = v
468 475 return self.t(tag, **vargs)
469 476 lastname = 'last_' + name
470 477 if lastname in self.t:
471 478 last = values.pop()
472 479 else:
473 480 last = None
474 481 for v in values:
475 482 yield one(v)
476 483 if last is not None:
477 484 yield one(last, tag=lastname)
478 485 endname = 'end_' + names
479 486 if endname in self.t:
480 487 yield self.t(endname, **args)
481 488
482 489 if brinfo:
483 490 def showbranches(**args):
484 491 if changenode in brinfo:
485 492 for x in showlist('branch', brinfo[changenode],
486 493 plural='branches', **args):
487 494 yield x
488 495 else:
489 496 showbranches = ''
490 497
491 498 if self.ui.debugflag:
492 499 def showmanifest(**args):
493 500 args = args.copy()
494 501 args.update(dict(rev=self.repo.manifest.rev(changes[0]),
495 502 node=hex(changes[0])))
496 503 yield self.t('manifest', **args)
497 504 else:
498 505 showmanifest = ''
499 506
500 507 def showparents(**args):
501 508 parents = [[('rev', log.rev(p)), ('node', hex(p))]
502 509 for p in log.parents(changenode)
503 510 if self.ui.debugflag or p != nullid]
504 511 if (not self.ui.debugflag and len(parents) == 1 and
505 512 parents[0][0][1] == rev - 1):
506 513 return
507 514 for x in showlist('parent', parents, **args):
508 515 yield x
509 516
510 517 def showtags(**args):
511 518 for x in showlist('tag', self.repo.nodetags(changenode), **args):
512 519 yield x
513 520
514 521 if self.ui.debugflag:
515 522 files = self.repo.changes(log.parents(changenode)[0], changenode)
516 523 def showfiles(**args):
517 524 for x in showlist('file', files[0], **args): yield x
518 525 def showadds(**args):
519 526 for x in showlist('file_add', files[1], **args): yield x
520 527 def showdels(**args):
521 528 for x in showlist('file_del', files[2], **args): yield x
522 529 else:
523 530 def showfiles(**args):
524 531 for x in showlist('file', changes[3], **args): yield x
525 532 showadds = ''
526 533 showdels = ''
527 534
528 535 props = {
529 536 'author': changes[1],
530 537 'branches': showbranches,
531 538 'date': changes[2],
532 539 'desc': changes[4],
533 540 'file_adds': showadds,
534 541 'file_dels': showdels,
535 542 'files': showfiles,
536 543 'manifest': showmanifest,
537 544 'node': hex(changenode),
538 545 'parents': showparents,
539 546 'rev': rev,
540 547 'tags': showtags,
541 548 }
542 549
543 550 try:
544 551 if self.ui.debugflag and 'changeset_debug' in self.t:
545 552 key = 'changeset_debug'
546 553 elif self.ui.quiet and 'changeset_quiet' in self.t:
547 554 key = 'changeset_quiet'
548 555 elif self.ui.verbose and 'changeset_verbose' in self.t:
549 556 key = 'changeset_verbose'
550 557 else:
551 558 key = 'changeset'
552 559 self.write(self.t(key, **props))
553 560 except KeyError, inst:
554 561 raise util.Abort(_("%s: no key named '%s'") % (self.t.mapfile,
555 562 inst.args[0]))
556 563 except SyntaxError, inst:
557 564 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
558 565
559 566 class changeset_printer(object):
560 567 '''show changeset information when templating not requested.'''
561 568
562 569 def __init__(self, ui, repo):
563 570 self.ui = ui
564 571 self.repo = repo
565 572
566 573 def show(self, rev=0, changenode=None, brinfo=None):
567 574 '''show a single changeset or file revision'''
568 575 log = self.repo.changelog
569 576 if changenode is None:
570 577 changenode = log.node(rev)
571 578 elif not rev:
572 579 rev = log.rev(changenode)
573 580
574 581 if self.ui.quiet:
575 582 self.ui.write("%d:%s\n" % (rev, short(changenode)))
576 583 return
577 584
578 585 changes = log.read(changenode)
579 586 date = util.datestr(changes[2])
580 587
581 588 parents = [(log.rev(p), self.ui.verbose and hex(p) or short(p))
582 589 for p in log.parents(changenode)
583 590 if self.ui.debugflag or p != nullid]
584 591 if (not self.ui.debugflag and len(parents) == 1 and
585 592 parents[0][0] == rev-1):
586 593 parents = []
587 594
588 595 if self.ui.verbose:
589 596 self.ui.write(_("changeset: %d:%s\n") % (rev, hex(changenode)))
590 597 else:
591 598 self.ui.write(_("changeset: %d:%s\n") % (rev, short(changenode)))
592 599
593 600 for tag in self.repo.nodetags(changenode):
594 601 self.ui.status(_("tag: %s\n") % tag)
595 602 for parent in parents:
596 603 self.ui.write(_("parent: %d:%s\n") % parent)
597 604
598 605 if brinfo and changenode in brinfo:
599 606 br = brinfo[changenode]
600 607 self.ui.write(_("branch: %s\n") % " ".join(br))
601 608
602 609 self.ui.debug(_("manifest: %d:%s\n") %
603 610 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
604 611 self.ui.status(_("user: %s\n") % changes[1])
605 612 self.ui.status(_("date: %s\n") % date)
606 613
607 614 if self.ui.debugflag:
608 615 files = self.repo.changes(log.parents(changenode)[0], changenode)
609 616 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
610 617 files):
611 618 if value:
612 619 self.ui.note("%-12s %s\n" % (key, " ".join(value)))
613 620 else:
614 621 self.ui.note(_("files: %s\n") % " ".join(changes[3]))
615 622
616 623 description = changes[4].strip()
617 624 if description:
618 625 if self.ui.verbose:
619 626 self.ui.status(_("description:\n"))
620 627 self.ui.status(description)
621 628 self.ui.status("\n\n")
622 629 else:
623 630 self.ui.status(_("summary: %s\n") %
624 631 description.splitlines()[0])
625 632 self.ui.status("\n")
626 633
627 634 def show_changeset(ui, repo, opts):
628 635 '''show one changeset. uses template or regular display. caller
629 636 can pass in 'style' and 'template' options in opts.'''
630 637
631 638 tmpl = opts.get('template')
632 639 if tmpl:
633 640 tmpl = templater.parsestring(tmpl, quoted=False)
634 641 else:
635 642 tmpl = ui.config('ui', 'logtemplate')
636 643 if tmpl: tmpl = templater.parsestring(tmpl)
637 644 mapfile = opts.get('style') or ui.config('ui', 'style')
638 645 if tmpl or mapfile:
639 646 if mapfile:
640 647 if not os.path.isfile(mapfile):
641 648 mapname = templater.templatepath('map-cmdline.' + mapfile)
642 649 if not mapname: mapname = templater.templatepath(mapfile)
643 650 if mapname: mapfile = mapname
644 651 try:
645 652 t = changeset_templater(ui, repo, mapfile)
646 653 except SyntaxError, inst:
647 654 raise util.Abort(inst.args[0])
648 655 if tmpl: t.use_template(tmpl)
649 656 return t
650 657 return changeset_printer(ui, repo)
651 658
652 659 def show_version(ui):
653 660 """output version and copyright information"""
654 661 ui.write(_("Mercurial Distributed SCM (version %s)\n")
655 662 % version.get_version())
656 663 ui.status(_(
657 664 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
658 665 "This is free software; see the source for copying conditions. "
659 666 "There is NO\nwarranty; "
660 667 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
661 668 ))
662 669
663 670 def help_(ui, cmd=None, with_version=False):
664 671 """show help for a given command or all commands"""
665 672 option_lists = []
666 673 if cmd and cmd != 'shortlist':
667 674 if with_version:
668 675 show_version(ui)
669 676 ui.write('\n')
670 677 aliases, i = find(cmd)
671 678 # synopsis
672 679 ui.write("%s\n\n" % i[2])
673 680
674 681 # description
675 682 doc = i[0].__doc__
676 683 if not doc:
677 684 doc = _("(No help text available)")
678 685 if ui.quiet:
679 686 doc = doc.splitlines(0)[0]
680 687 ui.write("%s\n" % doc.rstrip())
681 688
682 689 if not ui.quiet:
683 690 # aliases
684 691 if len(aliases) > 1:
685 692 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
686 693
687 694 # options
688 695 if i[1]:
689 696 option_lists.append(("options", i[1]))
690 697
691 698 else:
692 699 # program name
693 700 if ui.verbose or with_version:
694 701 show_version(ui)
695 702 else:
696 703 ui.status(_("Mercurial Distributed SCM\n"))
697 704 ui.status('\n')
698 705
699 706 # list of commands
700 707 if cmd == "shortlist":
701 708 ui.status(_('basic commands (use "hg help" '
702 709 'for the full list or option "-v" for details):\n\n'))
703 710 elif ui.verbose:
704 711 ui.status(_('list of commands:\n\n'))
705 712 else:
706 713 ui.status(_('list of commands (use "hg help -v" '
707 714 'to show aliases and global options):\n\n'))
708 715
709 716 h = {}
710 717 cmds = {}
711 718 for c, e in table.items():
712 719 f = c.split("|")[0]
713 720 if cmd == "shortlist" and not f.startswith("^"):
714 721 continue
715 722 f = f.lstrip("^")
716 723 if not ui.debugflag and f.startswith("debug"):
717 724 continue
718 725 doc = e[0].__doc__
719 726 if not doc:
720 727 doc = _("(No help text available)")
721 728 h[f] = doc.splitlines(0)[0].rstrip()
722 729 cmds[f] = c.lstrip("^")
723 730
724 731 fns = h.keys()
725 732 fns.sort()
726 733 m = max(map(len, fns))
727 734 for f in fns:
728 735 if ui.verbose:
729 736 commands = cmds[f].replace("|",", ")
730 737 ui.write(" %s:\n %s\n"%(commands, h[f]))
731 738 else:
732 739 ui.write(' %-*s %s\n' % (m, f, h[f]))
733 740
734 741 # global options
735 742 if ui.verbose:
736 743 option_lists.append(("global options", globalopts))
737 744
738 745 # list all option lists
739 746 opt_output = []
740 747 for title, options in option_lists:
741 748 opt_output.append(("\n%s:\n" % title, None))
742 749 for shortopt, longopt, default, desc in options:
743 750 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
744 751 longopt and " --%s" % longopt),
745 752 "%s%s" % (desc,
746 753 default
747 754 and _(" (default: %s)") % default
748 755 or "")))
749 756
750 757 if opt_output:
751 758 opts_len = max([len(line[0]) for line in opt_output if line[1]])
752 759 for first, second in opt_output:
753 760 if second:
754 761 ui.write(" %-*s %s\n" % (opts_len, first, second))
755 762 else:
756 763 ui.write("%s\n" % first)
757 764
758 765 # Commands start here, listed alphabetically
759 766
760 767 def add(ui, repo, *pats, **opts):
761 768 """add the specified files on the next commit
762 769
763 770 Schedule files to be version controlled and added to the repository.
764 771
765 772 The files will be added to the repository at the next commit.
766 773
767 774 If no names are given, add all files in the repository.
768 775 """
769 776
770 777 names = []
771 778 for src, abs, rel, exact in walk(repo, pats, opts):
772 779 if exact:
773 780 if ui.verbose:
774 781 ui.status(_('adding %s\n') % rel)
775 782 names.append(abs)
776 783 elif repo.dirstate.state(abs) == '?':
777 784 ui.status(_('adding %s\n') % rel)
778 785 names.append(abs)
779 786 repo.add(names)
780 787
781 788 def addremove(ui, repo, *pats, **opts):
782 789 """add all new files, delete all missing files
783 790
784 791 Add all new files and remove all missing files from the repository.
785 792
786 793 New files are ignored if they match any of the patterns in .hgignore. As
787 794 with add, these changes take effect at the next commit.
788 795 """
789 796 return addremove_lock(ui, repo, pats, opts)
790 797
791 798 def addremove_lock(ui, repo, pats, opts, wlock=None):
792 799 add, remove = [], []
793 800 for src, abs, rel, exact in walk(repo, pats, opts):
794 801 if src == 'f' and repo.dirstate.state(abs) == '?':
795 802 add.append(abs)
796 803 if ui.verbose or not exact:
797 804 ui.status(_('adding %s\n') % ((pats and rel) or abs))
798 805 if repo.dirstate.state(abs) != 'r' and not os.path.exists(rel):
799 806 remove.append(abs)
800 807 if ui.verbose or not exact:
801 808 ui.status(_('removing %s\n') % ((pats and rel) or abs))
802 809 repo.add(add, wlock=wlock)
803 810 repo.remove(remove, wlock=wlock)
804 811
805 812 def annotate(ui, repo, *pats, **opts):
806 813 """show changeset information per file line
807 814
808 815 List changes in files, showing the revision id responsible for each line
809 816
810 817 This command is useful to discover who did a change or when a change took
811 818 place.
812 819
813 820 Without the -a option, annotate will avoid processing files it
814 821 detects as binary. With -a, annotate will generate an annotation
815 822 anyway, probably with undesirable results.
816 823 """
817 824 def getnode(rev):
818 825 return short(repo.changelog.node(rev))
819 826
820 827 ucache = {}
821 828 def getname(rev):
822 829 cl = repo.changelog.read(repo.changelog.node(rev))
823 830 return trimuser(ui, cl[1], rev, ucache)
824 831
825 832 dcache = {}
826 833 def getdate(rev):
827 834 datestr = dcache.get(rev)
828 835 if datestr is None:
829 836 cl = repo.changelog.read(repo.changelog.node(rev))
830 837 datestr = dcache[rev] = util.datestr(cl[2])
831 838 return datestr
832 839
833 840 if not pats:
834 841 raise util.Abort(_('at least one file name or pattern required'))
835 842
836 843 opmap = [['user', getname], ['number', str], ['changeset', getnode],
837 844 ['date', getdate]]
838 845 if not opts['user'] and not opts['changeset'] and not opts['date']:
839 846 opts['number'] = 1
840 847
841 848 if opts['rev']:
842 849 node = repo.changelog.lookup(opts['rev'])
843 850 else:
844 851 node = repo.dirstate.parents()[0]
845 852 change = repo.changelog.read(node)
846 853 mmap = repo.manifest.read(change[0])
847 854
848 855 for src, abs, rel, exact in walk(repo, pats, opts, node=node):
849 856 f = repo.file(abs)
850 857 if not opts['text'] and util.binary(f.read(mmap[abs])):
851 858 ui.write(_("%s: binary file\n") % ((pats and rel) or abs))
852 859 continue
853 860
854 861 lines = f.annotate(mmap[abs])
855 862 pieces = []
856 863
857 864 for o, f in opmap:
858 865 if opts[o]:
859 866 l = [f(n) for n, dummy in lines]
860 867 if l:
861 868 m = max(map(len, l))
862 869 pieces.append(["%*s" % (m, x) for x in l])
863 870
864 871 if pieces:
865 872 for p, l in zip(zip(*pieces), lines):
866 873 ui.write("%s: %s" % (" ".join(p), l[1]))
867 874
868 875 def bundle(ui, repo, fname, dest="default-push", **opts):
869 876 """create a changegroup file
870 877
871 878 Generate a compressed changegroup file collecting all changesets
872 879 not found in the other repository.
873 880
874 881 This file can then be transferred using conventional means and
875 882 applied to another repository with the unbundle command. This is
876 883 useful when native push and pull are not available or when
877 884 exporting an entire repository is undesirable. The standard file
878 885 extension is ".hg".
879 886
880 887 Unlike import/export, this exactly preserves all changeset
881 888 contents including permissions, rename data, and revision history.
882 889 """
883 890 dest = ui.expandpath(dest)
884 891 other = hg.repository(ui, dest)
885 892 o = repo.findoutgoing(other, force=opts['force'])
886 893 cg = repo.changegroup(o, 'bundle')
887 894 write_bundle(cg, fname)
888 895
889 896 def cat(ui, repo, file1, *pats, **opts):
890 897 """output the latest or given revisions of files
891 898
892 899 Print the specified files as they were at the given revision.
893 900 If no revision is given then the tip is used.
894 901
895 902 Output may be to a file, in which case the name of the file is
896 903 given using a format string. The formatting rules are the same as
897 904 for the export command, with the following additions:
898 905
899 906 %s basename of file being printed
900 907 %d dirname of file being printed, or '.' if in repo root
901 908 %p root-relative path name of file being printed
902 909 """
903 910 mf = {}
904 911 rev = opts['rev']
905 912 if rev:
906 913 node = repo.lookup(rev)
907 914 else:
908 915 node = repo.changelog.tip()
909 916 change = repo.changelog.read(node)
910 917 mf = repo.manifest.read(change[0])
911 918 for src, abs, rel, exact in walk(repo, (file1,) + pats, opts, node):
912 919 r = repo.file(abs)
913 920 n = mf[abs]
914 921 fp = make_file(repo, r, opts['output'], node=n, pathname=abs)
915 922 fp.write(r.read(n))
916 923
917 924 def clone(ui, source, dest=None, **opts):
918 925 """make a copy of an existing repository
919 926
920 927 Create a copy of an existing repository in a new directory.
921 928
922 929 If no destination directory name is specified, it defaults to the
923 930 basename of the source.
924 931
925 932 The location of the source is added to the new repository's
926 933 .hg/hgrc file, as the default to be used for future pulls.
927 934
928 935 For efficiency, hardlinks are used for cloning whenever the source
929 936 and destination are on the same filesystem. Some filesystems,
930 937 such as AFS, implement hardlinking incorrectly, but do not report
931 938 errors. In these cases, use the --pull option to avoid
932 939 hardlinking.
933 940
934 941 See pull for valid source format details.
935 942 """
936 943 if dest is None:
937 944 dest = os.path.basename(os.path.normpath(source))
938 945
939 946 if os.path.exists(dest):
940 947 raise util.Abort(_("destination '%s' already exists"), dest)
941 948
942 949 dest = os.path.realpath(dest)
943 950
944 951 class Dircleanup(object):
945 952 def __init__(self, dir_):
946 953 self.rmtree = shutil.rmtree
947 954 self.dir_ = dir_
948 955 os.mkdir(dir_)
949 956 def close(self):
950 957 self.dir_ = None
951 958 def __del__(self):
952 959 if self.dir_:
953 960 self.rmtree(self.dir_, True)
954 961
955 962 if opts['ssh']:
956 963 ui.setconfig("ui", "ssh", opts['ssh'])
957 964 if opts['remotecmd']:
958 965 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
959 966
960 967 source = ui.expandpath(source)
961 968
962 969 d = Dircleanup(dest)
963 970 abspath = source
964 971 other = hg.repository(ui, source)
965 972
966 973 copy = False
967 974 if other.dev() != -1:
968 975 abspath = os.path.abspath(source)
969 976 if not opts['pull'] and not opts['rev']:
970 977 copy = True
971 978
972 979 if copy:
973 980 try:
974 981 # we use a lock here because if we race with commit, we
975 982 # can end up with extra data in the cloned revlogs that's
976 983 # not pointed to by changesets, thus causing verify to
977 984 # fail
978 985 l1 = other.lock()
979 986 except lock.LockException:
980 987 copy = False
981 988
982 989 if copy:
983 990 # we lock here to avoid premature writing to the target
984 991 os.mkdir(os.path.join(dest, ".hg"))
985 992 l2 = lock.lock(os.path.join(dest, ".hg", "lock"))
986 993
987 994 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
988 995 for f in files.split():
989 996 src = os.path.join(source, ".hg", f)
990 997 dst = os.path.join(dest, ".hg", f)
991 998 try:
992 999 util.copyfiles(src, dst)
993 1000 except OSError, inst:
994 1001 if inst.errno != errno.ENOENT:
995 1002 raise
996 1003
997 1004 repo = hg.repository(ui, dest)
998 1005
999 1006 else:
1000 1007 revs = None
1001 1008 if opts['rev']:
1002 1009 if not other.local():
1003 1010 error = _("clone -r not supported yet for remote repositories.")
1004 1011 raise util.Abort(error)
1005 1012 else:
1006 1013 revs = [other.lookup(rev) for rev in opts['rev']]
1007 1014 repo = hg.repository(ui, dest, create=1)
1008 1015 repo.pull(other, heads = revs)
1009 1016
1010 1017 f = repo.opener("hgrc", "w", text=True)
1011 1018 f.write("[paths]\n")
1012 1019 f.write("default = %s\n" % abspath)
1013 1020 f.close()
1014 1021
1015 1022 if not opts['noupdate']:
1016 1023 update(repo.ui, repo)
1017 1024
1018 1025 d.close()
1019 1026
1020 1027 def commit(ui, repo, *pats, **opts):
1021 1028 """commit the specified files or all outstanding changes
1022 1029
1023 1030 Commit changes to the given files into the repository.
1024 1031
1025 1032 If a list of files is omitted, all changes reported by "hg status"
1026 1033 will be commited.
1027 1034
1028 1035 The HGEDITOR or EDITOR environment variables are used to start an
1029 1036 editor to add a commit comment.
1030 1037 """
1031 1038 message = opts['message']
1032 1039 logfile = opts['logfile']
1033 1040
1034 1041 if message and logfile:
1035 1042 raise util.Abort(_('options --message and --logfile are mutually '
1036 1043 'exclusive'))
1037 1044 if not message and logfile:
1038 1045 try:
1039 1046 if logfile == '-':
1040 1047 message = sys.stdin.read()
1041 1048 else:
1042 1049 message = open(logfile).read()
1043 1050 except IOError, inst:
1044 1051 raise util.Abort(_("can't read commit message '%s': %s") %
1045 1052 (logfile, inst.strerror))
1046 1053
1047 1054 if opts['addremove']:
1048 1055 addremove(ui, repo, *pats, **opts)
1049 1056 fns, match, anypats = matchpats(repo, pats, opts)
1050 1057 if pats:
1051 1058 modified, added, removed, deleted, unknown = (
1052 1059 repo.changes(files=fns, match=match))
1053 1060 files = modified + added + removed
1054 1061 else:
1055 1062 files = []
1056 1063 try:
1057 1064 repo.commit(files, message, opts['user'], opts['date'], match)
1058 1065 except ValueError, inst:
1059 1066 raise util.Abort(str(inst))
1060 1067
1061 1068 def docopy(ui, repo, pats, opts, wlock):
1062 1069 # called with the repo lock held
1063 1070 cwd = repo.getcwd()
1064 1071 errors = 0
1065 1072 copied = []
1066 1073 targets = {}
1067 1074
1068 1075 def okaytocopy(abs, rel, exact):
1069 1076 reasons = {'?': _('is not managed'),
1070 1077 'a': _('has been marked for add'),
1071 1078 'r': _('has been marked for remove')}
1072 1079 state = repo.dirstate.state(abs)
1073 1080 reason = reasons.get(state)
1074 1081 if reason:
1075 1082 if state == 'a':
1076 1083 origsrc = repo.dirstate.copied(abs)
1077 1084 if origsrc is not None:
1078 1085 return origsrc
1079 1086 if exact:
1080 1087 ui.warn(_('%s: not copying - file %s\n') % (rel, reason))
1081 1088 else:
1082 1089 return abs
1083 1090
1084 1091 def copy(origsrc, abssrc, relsrc, target, exact):
1085 1092 abstarget = util.canonpath(repo.root, cwd, target)
1086 1093 reltarget = util.pathto(cwd, abstarget)
1087 1094 prevsrc = targets.get(abstarget)
1088 1095 if prevsrc is not None:
1089 1096 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1090 1097 (reltarget, abssrc, prevsrc))
1091 1098 return
1092 1099 if (not opts['after'] and os.path.exists(reltarget) or
1093 1100 opts['after'] and repo.dirstate.state(abstarget) not in '?r'):
1094 1101 if not opts['force']:
1095 1102 ui.warn(_('%s: not overwriting - file exists\n') %
1096 1103 reltarget)
1097 1104 return
1098 1105 if not opts['after']:
1099 1106 os.unlink(reltarget)
1100 1107 if opts['after']:
1101 1108 if not os.path.exists(reltarget):
1102 1109 return
1103 1110 else:
1104 1111 targetdir = os.path.dirname(reltarget) or '.'
1105 1112 if not os.path.isdir(targetdir):
1106 1113 os.makedirs(targetdir)
1107 1114 try:
1108 1115 restore = repo.dirstate.state(abstarget) == 'r'
1109 1116 if restore:
1110 1117 repo.undelete([abstarget], wlock)
1111 1118 try:
1112 1119 shutil.copyfile(relsrc, reltarget)
1113 1120 shutil.copymode(relsrc, reltarget)
1114 1121 restore = False
1115 1122 finally:
1116 1123 if restore:
1117 1124 repo.remove([abstarget], wlock)
1118 1125 except shutil.Error, inst:
1119 1126 raise util.Abort(str(inst))
1120 1127 except IOError, inst:
1121 1128 if inst.errno == errno.ENOENT:
1122 1129 ui.warn(_('%s: deleted in working copy\n') % relsrc)
1123 1130 else:
1124 1131 ui.warn(_('%s: cannot copy - %s\n') %
1125 1132 (relsrc, inst.strerror))
1126 1133 errors += 1
1127 1134 return
1128 1135 if ui.verbose or not exact:
1129 1136 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1130 1137 targets[abstarget] = abssrc
1131 1138 if abstarget != origsrc:
1132 1139 repo.copy(origsrc, abstarget, wlock)
1133 1140 copied.append((abssrc, relsrc, exact))
1134 1141
1135 1142 def targetpathfn(pat, dest, srcs):
1136 1143 if os.path.isdir(pat):
1137 1144 abspfx = util.canonpath(repo.root, cwd, pat)
1138 1145 if destdirexists:
1139 1146 striplen = len(os.path.split(abspfx)[0])
1140 1147 else:
1141 1148 striplen = len(abspfx)
1142 1149 if striplen:
1143 1150 striplen += len(os.sep)
1144 1151 res = lambda p: os.path.join(dest, p[striplen:])
1145 1152 elif destdirexists:
1146 1153 res = lambda p: os.path.join(dest, os.path.basename(p))
1147 1154 else:
1148 1155 res = lambda p: dest
1149 1156 return res
1150 1157
1151 1158 def targetpathafterfn(pat, dest, srcs):
1152 1159 if util.patkind(pat, None)[0]:
1153 1160 # a mercurial pattern
1154 1161 res = lambda p: os.path.join(dest, os.path.basename(p))
1155 1162 else:
1156 1163 abspfx = util.canonpath(repo.root, cwd, pat)
1157 1164 if len(abspfx) < len(srcs[0][0]):
1158 1165 # A directory. Either the target path contains the last
1159 1166 # component of the source path or it does not.
1160 1167 def evalpath(striplen):
1161 1168 score = 0
1162 1169 for s in srcs:
1163 1170 t = os.path.join(dest, s[0][striplen:])
1164 1171 if os.path.exists(t):
1165 1172 score += 1
1166 1173 return score
1167 1174
1168 1175 striplen = len(abspfx)
1169 1176 if striplen:
1170 1177 striplen += len(os.sep)
1171 1178 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1172 1179 score = evalpath(striplen)
1173 1180 striplen1 = len(os.path.split(abspfx)[0])
1174 1181 if striplen1:
1175 1182 striplen1 += len(os.sep)
1176 1183 if evalpath(striplen1) > score:
1177 1184 striplen = striplen1
1178 1185 res = lambda p: os.path.join(dest, p[striplen:])
1179 1186 else:
1180 1187 # a file
1181 1188 if destdirexists:
1182 1189 res = lambda p: os.path.join(dest, os.path.basename(p))
1183 1190 else:
1184 1191 res = lambda p: dest
1185 1192 return res
1186 1193
1187 1194
1188 1195 pats = list(pats)
1189 1196 if not pats:
1190 1197 raise util.Abort(_('no source or destination specified'))
1191 1198 if len(pats) == 1:
1192 1199 raise util.Abort(_('no destination specified'))
1193 1200 dest = pats.pop()
1194 1201 destdirexists = os.path.isdir(dest)
1195 1202 if (len(pats) > 1 or util.patkind(pats[0], None)[0]) and not destdirexists:
1196 1203 raise util.Abort(_('with multiple sources, destination must be an '
1197 1204 'existing directory'))
1198 1205 if opts['after']:
1199 1206 tfn = targetpathafterfn
1200 1207 else:
1201 1208 tfn = targetpathfn
1202 1209 copylist = []
1203 1210 for pat in pats:
1204 1211 srcs = []
1205 1212 for tag, abssrc, relsrc, exact in walk(repo, [pat], opts):
1206 1213 origsrc = okaytocopy(abssrc, relsrc, exact)
1207 1214 if origsrc:
1208 1215 srcs.append((origsrc, abssrc, relsrc, exact))
1209 1216 if not srcs:
1210 1217 continue
1211 1218 copylist.append((tfn(pat, dest, srcs), srcs))
1212 1219 if not copylist:
1213 1220 raise util.Abort(_('no files to copy'))
1214 1221
1215 1222 for targetpath, srcs in copylist:
1216 1223 for origsrc, abssrc, relsrc, exact in srcs:
1217 1224 copy(origsrc, abssrc, relsrc, targetpath(abssrc), exact)
1218 1225
1219 1226 if errors:
1220 1227 ui.warn(_('(consider using --after)\n'))
1221 1228 return errors, copied
1222 1229
1223 1230 def copy(ui, repo, *pats, **opts):
1224 1231 """mark files as copied for the next commit
1225 1232
1226 1233 Mark dest as having copies of source files. If dest is a
1227 1234 directory, copies are put in that directory. If dest is a file,
1228 1235 there can only be one source.
1229 1236
1230 1237 By default, this command copies the contents of files as they
1231 1238 stand in the working directory. If invoked with --after, the
1232 1239 operation is recorded, but no copying is performed.
1233 1240
1234 1241 This command takes effect in the next commit.
1235 1242
1236 1243 NOTE: This command should be treated as experimental. While it
1237 1244 should properly record copied files, this information is not yet
1238 1245 fully used by merge, nor fully reported by log.
1239 1246 """
1240 1247 try:
1241 1248 wlock = repo.wlock(0)
1242 1249 errs, copied = docopy(ui, repo, pats, opts, wlock)
1243 1250 except lock.LockHeld, inst:
1244 1251 ui.warn(_("repository lock held by %s\n") % inst.args[0])
1245 1252 errs = 1
1246 1253 return errs
1247 1254
1248 1255 def debugancestor(ui, index, rev1, rev2):
1249 1256 """find the ancestor revision of two revisions in a given index"""
1250 1257 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index, "")
1251 1258 a = r.ancestor(r.lookup(rev1), r.lookup(rev2))
1252 1259 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1253 1260
1254 1261 def debugcomplete(ui, cmd):
1255 1262 """returns the completion list associated with the given command"""
1256 1263 clist = findpossible(cmd).keys()
1257 1264 clist.sort()
1258 1265 ui.write("%s\n" % " ".join(clist))
1259 1266
1260 1267 def debugrebuildstate(ui, repo, rev=None):
1261 1268 """rebuild the dirstate as it would look like for the given revision"""
1262 1269 if not rev:
1263 1270 rev = repo.changelog.tip()
1264 1271 else:
1265 1272 rev = repo.lookup(rev)
1266 1273 change = repo.changelog.read(rev)
1267 1274 n = change[0]
1268 1275 files = repo.manifest.readflags(n)
1269 1276 wlock = repo.wlock()
1270 1277 repo.dirstate.rebuild(rev, files.iteritems())
1271 1278
1272 1279 def debugcheckstate(ui, repo):
1273 1280 """validate the correctness of the current dirstate"""
1274 1281 parent1, parent2 = repo.dirstate.parents()
1275 1282 repo.dirstate.read()
1276 1283 dc = repo.dirstate.map
1277 1284 keys = dc.keys()
1278 1285 keys.sort()
1279 1286 m1n = repo.changelog.read(parent1)[0]
1280 1287 m2n = repo.changelog.read(parent2)[0]
1281 1288 m1 = repo.manifest.read(m1n)
1282 1289 m2 = repo.manifest.read(m2n)
1283 1290 errors = 0
1284 1291 for f in dc:
1285 1292 state = repo.dirstate.state(f)
1286 1293 if state in "nr" and f not in m1:
1287 1294 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1288 1295 errors += 1
1289 1296 if state in "a" and f in m1:
1290 1297 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1291 1298 errors += 1
1292 1299 if state in "m" and f not in m1 and f not in m2:
1293 1300 ui.warn(_("%s in state %s, but not in either manifest\n") %
1294 1301 (f, state))
1295 1302 errors += 1
1296 1303 for f in m1:
1297 1304 state = repo.dirstate.state(f)
1298 1305 if state not in "nrm":
1299 1306 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1300 1307 errors += 1
1301 1308 if errors:
1302 1309 error = _(".hg/dirstate inconsistent with current parent's manifest")
1303 1310 raise util.Abort(error)
1304 1311
1305 1312 def debugconfig(ui, repo):
1306 1313 """show combined config settings from all hgrc files"""
1307 1314 for section, name, value in ui.walkconfig():
1308 1315 ui.write('%s.%s=%s\n' % (section, name, value))
1309 1316
1310 1317 def debugsetparents(ui, repo, rev1, rev2=None):
1311 1318 """manually set the parents of the current working directory
1312 1319
1313 1320 This is useful for writing repository conversion tools, but should
1314 1321 be used with care.
1315 1322 """
1316 1323
1317 1324 if not rev2:
1318 1325 rev2 = hex(nullid)
1319 1326
1320 1327 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
1321 1328
1322 1329 def debugstate(ui, repo):
1323 1330 """show the contents of the current dirstate"""
1324 1331 repo.dirstate.read()
1325 1332 dc = repo.dirstate.map
1326 1333 keys = dc.keys()
1327 1334 keys.sort()
1328 1335 for file_ in keys:
1329 1336 ui.write("%c %3o %10d %s %s\n"
1330 1337 % (dc[file_][0], dc[file_][1] & 0777, dc[file_][2],
1331 1338 time.strftime("%x %X",
1332 1339 time.localtime(dc[file_][3])), file_))
1333 1340 for f in repo.dirstate.copies:
1334 1341 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copies[f], f))
1335 1342
1336 1343 def debugdata(ui, file_, rev):
1337 1344 """dump the contents of an data file revision"""
1338 1345 r = revlog.revlog(util.opener(os.getcwd(), audit=False),
1339 1346 file_[:-2] + ".i", file_)
1340 1347 try:
1341 1348 ui.write(r.revision(r.lookup(rev)))
1342 1349 except KeyError:
1343 1350 raise util.Abort(_('invalid revision identifier %s'), rev)
1344 1351
1345 1352 def debugindex(ui, file_):
1346 1353 """dump the contents of an index file"""
1347 1354 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "")
1348 1355 ui.write(" rev offset length base linkrev" +
1349 1356 " nodeid p1 p2\n")
1350 1357 for i in range(r.count()):
1351 1358 e = r.index[i]
1352 1359 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1353 1360 i, e[0], e[1], e[2], e[3],
1354 1361 short(e[6]), short(e[4]), short(e[5])))
1355 1362
1356 1363 def debugindexdot(ui, file_):
1357 1364 """dump an index DAG as a .dot file"""
1358 1365 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "")
1359 1366 ui.write("digraph G {\n")
1360 1367 for i in range(r.count()):
1361 1368 e = r.index[i]
1362 1369 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
1363 1370 if e[5] != nullid:
1364 1371 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
1365 1372 ui.write("}\n")
1366 1373
1367 1374 def debugrename(ui, repo, file, rev=None):
1368 1375 """dump rename information"""
1369 1376 r = repo.file(relpath(repo, [file])[0])
1370 1377 if rev:
1371 1378 try:
1372 1379 # assume all revision numbers are for changesets
1373 1380 n = repo.lookup(rev)
1374 1381 change = repo.changelog.read(n)
1375 1382 m = repo.manifest.read(change[0])
1376 1383 n = m[relpath(repo, [file])[0]]
1377 1384 except (hg.RepoError, KeyError):
1378 1385 n = r.lookup(rev)
1379 1386 else:
1380 1387 n = r.tip()
1381 1388 m = r.renamed(n)
1382 1389 if m:
1383 1390 ui.write(_("renamed from %s:%s\n") % (m[0], hex(m[1])))
1384 1391 else:
1385 1392 ui.write(_("not renamed\n"))
1386 1393
1387 1394 def debugwalk(ui, repo, *pats, **opts):
1388 1395 """show how files match on given patterns"""
1389 1396 items = list(walk(repo, pats, opts))
1390 1397 if not items:
1391 1398 return
1392 1399 fmt = '%%s %%-%ds %%-%ds %%s' % (
1393 1400 max([len(abs) for (src, abs, rel, exact) in items]),
1394 1401 max([len(rel) for (src, abs, rel, exact) in items]))
1395 1402 for src, abs, rel, exact in items:
1396 1403 line = fmt % (src, abs, rel, exact and 'exact' or '')
1397 1404 ui.write("%s\n" % line.rstrip())
1398 1405
1399 1406 def diff(ui, repo, *pats, **opts):
1400 1407 """diff repository (or selected files)
1401 1408
1402 1409 Show differences between revisions for the specified files.
1403 1410
1404 1411 Differences between files are shown using the unified diff format.
1405 1412
1406 1413 When two revision arguments are given, then changes are shown
1407 1414 between those revisions. If only one revision is specified then
1408 1415 that revision is compared to the working directory, and, when no
1409 1416 revisions are specified, the working directory files are compared
1410 1417 to its parent.
1411 1418
1412 1419 Without the -a option, diff will avoid generating diffs of files
1413 1420 it detects as binary. With -a, diff will generate a diff anyway,
1414 1421 probably with undesirable results.
1415 1422 """
1416 1423 node1, node2 = None, None
1417 1424 revs = [repo.lookup(x) for x in opts['rev']]
1418 1425
1419 1426 if len(revs) > 0:
1420 1427 node1 = revs[0]
1421 1428 if len(revs) > 1:
1422 1429 node2 = revs[1]
1423 1430 if len(revs) > 2:
1424 1431 raise util.Abort(_("too many revisions to diff"))
1425 1432
1426 1433 fns, matchfn, anypats = matchpats(repo, pats, opts)
1427 1434
1428 1435 dodiff(sys.stdout, ui, repo, node1, node2, fns, match=matchfn,
1429 1436 text=opts['text'], opts=opts)
1430 1437
1431 1438 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
1432 1439 node = repo.lookup(changeset)
1433 1440 parents = [p for p in repo.changelog.parents(node) if p != nullid]
1434 1441 if opts['switch_parent']:
1435 1442 parents.reverse()
1436 1443 prev = (parents and parents[0]) or nullid
1437 1444 change = repo.changelog.read(node)
1438 1445
1439 1446 fp = make_file(repo, repo.changelog, opts['output'],
1440 1447 node=node, total=total, seqno=seqno,
1441 1448 revwidth=revwidth)
1442 1449 if fp != sys.stdout:
1443 1450 ui.note("%s\n" % fp.name)
1444 1451
1445 1452 fp.write("# HG changeset patch\n")
1446 1453 fp.write("# User %s\n" % change[1])
1447 1454 fp.write("# Node ID %s\n" % hex(node))
1448 1455 fp.write("# Parent %s\n" % hex(prev))
1449 1456 if len(parents) > 1:
1450 1457 fp.write("# Parent %s\n" % hex(parents[1]))
1451 1458 fp.write(change[4].rstrip())
1452 1459 fp.write("\n\n")
1453 1460
1454 1461 dodiff(fp, ui, repo, prev, node, text=opts['text'])
1455 1462 if fp != sys.stdout:
1456 1463 fp.close()
1457 1464
1458 1465 def export(ui, repo, *changesets, **opts):
1459 1466 """dump the header and diffs for one or more changesets
1460 1467
1461 1468 Print the changeset header and diffs for one or more revisions.
1462 1469
1463 1470 The information shown in the changeset header is: author,
1464 1471 changeset hash, parent and commit comment.
1465 1472
1466 1473 Output may be to a file, in which case the name of the file is
1467 1474 given using a format string. The formatting rules are as follows:
1468 1475
1469 1476 %% literal "%" character
1470 1477 %H changeset hash (40 bytes of hexadecimal)
1471 1478 %N number of patches being generated
1472 1479 %R changeset revision number
1473 1480 %b basename of the exporting repository
1474 1481 %h short-form changeset hash (12 bytes of hexadecimal)
1475 1482 %n zero-padded sequence number, starting at 1
1476 1483 %r zero-padded changeset revision number
1477 1484
1478 1485 Without the -a option, export will avoid generating diffs of files
1479 1486 it detects as binary. With -a, export will generate a diff anyway,
1480 1487 probably with undesirable results.
1481 1488
1482 1489 With the --switch-parent option, the diff will be against the second
1483 1490 parent. It can be useful to review a merge.
1484 1491 """
1485 1492 if not changesets:
1486 1493 raise util.Abort(_("export requires at least one changeset"))
1487 1494 seqno = 0
1488 1495 revs = list(revrange(ui, repo, changesets))
1489 1496 total = len(revs)
1490 1497 revwidth = max(map(len, revs))
1491 1498 msg = len(revs) > 1 and _("Exporting patches:\n") or _("Exporting patch:\n")
1492 1499 ui.note(msg)
1493 1500 for cset in revs:
1494 1501 seqno += 1
1495 1502 doexport(ui, repo, cset, seqno, total, revwidth, opts)
1496 1503
1497 1504 def forget(ui, repo, *pats, **opts):
1498 1505 """don't add the specified files on the next commit
1499 1506
1500 1507 Undo an 'hg add' scheduled for the next commit.
1501 1508 """
1502 1509 forget = []
1503 1510 for src, abs, rel, exact in walk(repo, pats, opts):
1504 1511 if repo.dirstate.state(abs) == 'a':
1505 1512 forget.append(abs)
1506 1513 if ui.verbose or not exact:
1507 1514 ui.status(_('forgetting %s\n') % ((pats and rel) or abs))
1508 1515 repo.forget(forget)
1509 1516
1510 1517 def grep(ui, repo, pattern, *pats, **opts):
1511 1518 """search for a pattern in specified files and revisions
1512 1519
1513 1520 Search revisions of files for a regular expression.
1514 1521
1515 1522 This command behaves differently than Unix grep. It only accepts
1516 1523 Python/Perl regexps. It searches repository history, not the
1517 1524 working directory. It always prints the revision number in which
1518 1525 a match appears.
1519 1526
1520 1527 By default, grep only prints output for the first revision of a
1521 1528 file in which it finds a match. To get it to print every revision
1522 1529 that contains a change in match status ("-" for a match that
1523 1530 becomes a non-match, or "+" for a non-match that becomes a match),
1524 1531 use the --all flag.
1525 1532 """
1526 1533 reflags = 0
1527 1534 if opts['ignore_case']:
1528 1535 reflags |= re.I
1529 1536 regexp = re.compile(pattern, reflags)
1530 1537 sep, eol = ':', '\n'
1531 1538 if opts['print0']:
1532 1539 sep = eol = '\0'
1533 1540
1534 1541 fcache = {}
1535 1542 def getfile(fn):
1536 1543 if fn not in fcache:
1537 1544 fcache[fn] = repo.file(fn)
1538 1545 return fcache[fn]
1539 1546
1540 1547 def matchlines(body):
1541 1548 begin = 0
1542 1549 linenum = 0
1543 1550 while True:
1544 1551 match = regexp.search(body, begin)
1545 1552 if not match:
1546 1553 break
1547 1554 mstart, mend = match.span()
1548 1555 linenum += body.count('\n', begin, mstart) + 1
1549 1556 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1550 1557 lend = body.find('\n', mend)
1551 1558 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1552 1559 begin = lend + 1
1553 1560
1554 1561 class linestate(object):
1555 1562 def __init__(self, line, linenum, colstart, colend):
1556 1563 self.line = line
1557 1564 self.linenum = linenum
1558 1565 self.colstart = colstart
1559 1566 self.colend = colend
1560 1567 def __eq__(self, other):
1561 1568 return self.line == other.line
1562 1569 def __hash__(self):
1563 1570 return hash(self.line)
1564 1571
1565 1572 matches = {}
1566 1573 def grepbody(fn, rev, body):
1567 1574 matches[rev].setdefault(fn, {})
1568 1575 m = matches[rev][fn]
1569 1576 for lnum, cstart, cend, line in matchlines(body):
1570 1577 s = linestate(line, lnum, cstart, cend)
1571 1578 m[s] = s
1572 1579
1573 1580 # FIXME: prev isn't used, why ?
1574 1581 prev = {}
1575 1582 ucache = {}
1576 1583 def display(fn, rev, states, prevstates):
1577 1584 diff = list(sets.Set(states).symmetric_difference(sets.Set(prevstates)))
1578 1585 diff.sort(lambda x, y: cmp(x.linenum, y.linenum))
1579 1586 counts = {'-': 0, '+': 0}
1580 1587 filerevmatches = {}
1581 1588 for l in diff:
1582 1589 if incrementing or not opts['all']:
1583 1590 change = ((l in prevstates) and '-') or '+'
1584 1591 r = rev
1585 1592 else:
1586 1593 change = ((l in states) and '-') or '+'
1587 1594 r = prev[fn]
1588 1595 cols = [fn, str(rev)]
1589 1596 if opts['line_number']:
1590 1597 cols.append(str(l.linenum))
1591 1598 if opts['all']:
1592 1599 cols.append(change)
1593 1600 if opts['user']:
1594 1601 cols.append(trimuser(ui, getchange(rev)[1], rev,
1595 1602 ucache))
1596 1603 if opts['files_with_matches']:
1597 1604 c = (fn, rev)
1598 1605 if c in filerevmatches:
1599 1606 continue
1600 1607 filerevmatches[c] = 1
1601 1608 else:
1602 1609 cols.append(l.line)
1603 1610 ui.write(sep.join(cols), eol)
1604 1611 counts[change] += 1
1605 1612 return counts['+'], counts['-']
1606 1613
1607 1614 fstate = {}
1608 1615 skip = {}
1609 1616 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1610 1617 count = 0
1611 1618 incrementing = False
1612 1619 for st, rev, fns in changeiter:
1613 1620 if st == 'window':
1614 1621 incrementing = rev
1615 1622 matches.clear()
1616 1623 elif st == 'add':
1617 1624 change = repo.changelog.read(repo.lookup(str(rev)))
1618 1625 mf = repo.manifest.read(change[0])
1619 1626 matches[rev] = {}
1620 1627 for fn in fns:
1621 1628 if fn in skip:
1622 1629 continue
1623 1630 fstate.setdefault(fn, {})
1624 1631 try:
1625 1632 grepbody(fn, rev, getfile(fn).read(mf[fn]))
1626 1633 except KeyError:
1627 1634 pass
1628 1635 elif st == 'iter':
1629 1636 states = matches[rev].items()
1630 1637 states.sort()
1631 1638 for fn, m in states:
1632 1639 if fn in skip:
1633 1640 continue
1634 1641 if incrementing or not opts['all'] or fstate[fn]:
1635 1642 pos, neg = display(fn, rev, m, fstate[fn])
1636 1643 count += pos + neg
1637 1644 if pos and not opts['all']:
1638 1645 skip[fn] = True
1639 1646 fstate[fn] = m
1640 1647 prev[fn] = rev
1641 1648
1642 1649 if not incrementing:
1643 1650 fstate = fstate.items()
1644 1651 fstate.sort()
1645 1652 for fn, state in fstate:
1646 1653 if fn in skip:
1647 1654 continue
1648 1655 display(fn, rev, {}, state)
1649 1656 return (count == 0 and 1) or 0
1650 1657
1651 1658 def heads(ui, repo, **opts):
1652 1659 """show current repository heads
1653 1660
1654 1661 Show all repository head changesets.
1655 1662
1656 1663 Repository "heads" are changesets that don't have children
1657 1664 changesets. They are where development generally takes place and
1658 1665 are the usual targets for update and merge operations.
1659 1666 """
1660 1667 if opts['rev']:
1661 1668 heads = repo.heads(repo.lookup(opts['rev']))
1662 1669 else:
1663 1670 heads = repo.heads()
1664 1671 br = None
1665 1672 if opts['branches']:
1666 1673 br = repo.branchlookup(heads)
1667 1674 displayer = show_changeset(ui, repo, opts)
1668 1675 for n in heads:
1669 1676 displayer.show(changenode=n, brinfo=br)
1670 1677
1671 1678 def identify(ui, repo):
1672 1679 """print information about the working copy
1673 1680
1674 1681 Print a short summary of the current state of the repo.
1675 1682
1676 1683 This summary identifies the repository state using one or two parent
1677 1684 hash identifiers, followed by a "+" if there are uncommitted changes
1678 1685 in the working directory, followed by a list of tags for this revision.
1679 1686 """
1680 1687 parents = [p for p in repo.dirstate.parents() if p != nullid]
1681 1688 if not parents:
1682 1689 ui.write(_("unknown\n"))
1683 1690 return
1684 1691
1685 1692 hexfunc = ui.verbose and hex or short
1686 1693 modified, added, removed, deleted, unknown = repo.changes()
1687 1694 output = ["%s%s" %
1688 1695 ('+'.join([hexfunc(parent) for parent in parents]),
1689 1696 (modified or added or removed or deleted) and "+" or "")]
1690 1697
1691 1698 if not ui.quiet:
1692 1699 # multiple tags for a single parent separated by '/'
1693 1700 parenttags = ['/'.join(tags)
1694 1701 for tags in map(repo.nodetags, parents) if tags]
1695 1702 # tags for multiple parents separated by ' + '
1696 1703 if parenttags:
1697 1704 output.append(' + '.join(parenttags))
1698 1705
1699 1706 ui.write("%s\n" % ' '.join(output))
1700 1707
1701 1708 def import_(ui, repo, patch1, *patches, **opts):
1702 1709 """import an ordered set of patches
1703 1710
1704 1711 Import a list of patches and commit them individually.
1705 1712
1706 1713 If there are outstanding changes in the working directory, import
1707 1714 will abort unless given the -f flag.
1708 1715
1709 1716 If a patch looks like a mail message (its first line starts with
1710 1717 "From " or looks like an RFC822 header), it will not be applied
1711 1718 unless the -f option is used. The importer neither parses nor
1712 1719 discards mail headers, so use -f only to override the "mailness"
1713 1720 safety check, not to import a real mail message.
1714 1721 """
1715 1722 patches = (patch1,) + patches
1716 1723
1717 1724 if not opts['force']:
1718 1725 modified, added, removed, deleted, unknown = repo.changes()
1719 1726 if modified or added or removed or deleted:
1720 1727 raise util.Abort(_("outstanding uncommitted changes"))
1721 1728
1722 1729 d = opts["base"]
1723 1730 strip = opts["strip"]
1724 1731
1725 1732 mailre = re.compile(r'(?:From |[\w-]+:)')
1726 1733
1727 1734 # attempt to detect the start of a patch
1728 1735 # (this heuristic is borrowed from quilt)
1729 1736 diffre = re.compile(r'(?:Index:[ \t]|diff[ \t]|RCS file: |' +
1730 1737 'retrieving revision [0-9]+(\.[0-9]+)*$|' +
1731 1738 '(---|\*\*\*)[ \t])')
1732 1739
1733 1740 for patch in patches:
1734 1741 ui.status(_("applying %s\n") % patch)
1735 1742 pf = os.path.join(d, patch)
1736 1743
1737 1744 message = []
1738 1745 user = None
1739 1746 hgpatch = False
1740 1747 for line in file(pf):
1741 1748 line = line.rstrip()
1742 1749 if (not message and not hgpatch and
1743 1750 mailre.match(line) and not opts['force']):
1744 1751 if len(line) > 35:
1745 1752 line = line[:32] + '...'
1746 1753 raise util.Abort(_('first line looks like a '
1747 1754 'mail header: ') + line)
1748 1755 if diffre.match(line):
1749 1756 break
1750 1757 elif hgpatch:
1751 1758 # parse values when importing the result of an hg export
1752 1759 if line.startswith("# User "):
1753 1760 user = line[7:]
1754 1761 ui.debug(_('User: %s\n') % user)
1755 1762 elif not line.startswith("# ") and line:
1756 1763 message.append(line)
1757 1764 hgpatch = False
1758 1765 elif line == '# HG changeset patch':
1759 1766 hgpatch = True
1760 1767 message = [] # We may have collected garbage
1761 1768 else:
1762 1769 message.append(line)
1763 1770
1764 1771 # make sure message isn't empty
1765 1772 if not message:
1766 1773 message = _("imported patch %s\n") % patch
1767 1774 else:
1768 1775 message = "%s\n" % '\n'.join(message)
1769 1776 ui.debug(_('message:\n%s\n') % message)
1770 1777
1771 1778 files = util.patch(strip, pf, ui)
1772 1779
1773 1780 if len(files) > 0:
1774 1781 addremove(ui, repo, *files)
1775 1782 repo.commit(files, message, user)
1776 1783
1777 1784 def incoming(ui, repo, source="default", **opts):
1778 1785 """show new changesets found in source
1779 1786
1780 1787 Show new changesets found in the specified path/URL or the default
1781 1788 pull location. These are the changesets that would be pulled if a pull
1782 1789 was requested.
1783 1790
1784 1791 For remote repository, using --bundle avoids downloading the changesets
1785 1792 twice if the incoming is followed by a pull.
1786 1793
1787 1794 See pull for valid source format details.
1788 1795 """
1789 1796 source = ui.expandpath(source)
1790 1797 if opts['ssh']:
1791 1798 ui.setconfig("ui", "ssh", opts['ssh'])
1792 1799 if opts['remotecmd']:
1793 1800 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
1794 1801
1795 1802 other = hg.repository(ui, source)
1796 1803 incoming = repo.findincoming(other, force=opts["force"])
1797 1804 if not incoming:
1798 1805 return
1799 1806
1800 1807 cleanup = None
1801 1808 try:
1802 1809 fname = opts["bundle"]
1803 1810 if fname or not other.local():
1804 1811 # create a bundle (uncompressed if other repo is not local)
1805 1812 cg = other.changegroup(incoming, "incoming")
1806 1813 fname = cleanup = write_bundle(cg, fname, compress=other.local())
1807 1814 # keep written bundle?
1808 1815 if opts["bundle"]:
1809 1816 cleanup = None
1810 1817 if not other.local():
1811 1818 # use the created uncompressed bundlerepo
1812 1819 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1813 1820
1814 1821 o = other.changelog.nodesbetween(incoming)[0]
1815 1822 if opts['newest_first']:
1816 1823 o.reverse()
1817 1824 displayer = show_changeset(ui, other, opts)
1818 1825 for n in o:
1819 1826 parents = [p for p in other.changelog.parents(n) if p != nullid]
1820 1827 if opts['no_merges'] and len(parents) == 2:
1821 1828 continue
1822 1829 displayer.show(changenode=n)
1823 1830 if opts['patch']:
1824 1831 prev = (parents and parents[0]) or nullid
1825 1832 dodiff(ui, ui, other, prev, n)
1826 1833 ui.write("\n")
1827 1834 finally:
1828 1835 if hasattr(other, 'close'):
1829 1836 other.close()
1830 1837 if cleanup:
1831 1838 os.unlink(cleanup)
1832 1839
1833 1840 def init(ui, dest="."):
1834 1841 """create a new repository in the given directory
1835 1842
1836 1843 Initialize a new repository in the given directory. If the given
1837 1844 directory does not exist, it is created.
1838 1845
1839 1846 If no directory is given, the current directory is used.
1840 1847 """
1841 1848 if not os.path.exists(dest):
1842 1849 os.mkdir(dest)
1843 1850 hg.repository(ui, dest, create=1)
1844 1851
1845 1852 def locate(ui, repo, *pats, **opts):
1846 1853 """locate files matching specific patterns
1847 1854
1848 1855 Print all files under Mercurial control whose names match the
1849 1856 given patterns.
1850 1857
1851 1858 This command searches the current directory and its
1852 1859 subdirectories. To search an entire repository, move to the root
1853 1860 of the repository.
1854 1861
1855 1862 If no patterns are given to match, this command prints all file
1856 1863 names.
1857 1864
1858 1865 If you want to feed the output of this command into the "xargs"
1859 1866 command, use the "-0" option to both this command and "xargs".
1860 1867 This will avoid the problem of "xargs" treating single filenames
1861 1868 that contain white space as multiple filenames.
1862 1869 """
1863 1870 end = opts['print0'] and '\0' or '\n'
1864 1871 rev = opts['rev']
1865 1872 if rev:
1866 1873 node = repo.lookup(rev)
1867 1874 else:
1868 1875 node = None
1869 1876
1870 1877 for src, abs, rel, exact in walk(repo, pats, opts, node=node,
1871 1878 head='(?:.*/|)'):
1872 1879 if not node and repo.dirstate.state(abs) == '?':
1873 1880 continue
1874 1881 if opts['fullpath']:
1875 1882 ui.write(os.path.join(repo.root, abs), end)
1876 1883 else:
1877 1884 ui.write(((pats and rel) or abs), end)
1878 1885
1879 1886 def log(ui, repo, *pats, **opts):
1880 1887 """show revision history of entire repository or files
1881 1888
1882 1889 Print the revision history of the specified files or the entire project.
1883 1890
1884 1891 By default this command outputs: changeset id and hash, tags,
1885 1892 non-trivial parents, user, date and time, and a summary for each
1886 1893 commit. When the -v/--verbose switch is used, the list of changed
1887 1894 files and full commit message is shown.
1888 1895 """
1889 1896 class dui(object):
1890 1897 # Implement and delegate some ui protocol. Save hunks of
1891 1898 # output for later display in the desired order.
1892 1899 def __init__(self, ui):
1893 1900 self.ui = ui
1894 1901 self.hunk = {}
1895 1902 def bump(self, rev):
1896 1903 self.rev = rev
1897 1904 self.hunk[rev] = []
1898 1905 def note(self, *args):
1899 1906 if self.verbose:
1900 1907 self.write(*args)
1901 1908 def status(self, *args):
1902 1909 if not self.quiet:
1903 1910 self.write(*args)
1904 1911 def write(self, *args):
1905 1912 self.hunk[self.rev].append(args)
1906 1913 def debug(self, *args):
1907 1914 if self.debugflag:
1908 1915 self.write(*args)
1909 1916 def __getattr__(self, key):
1910 1917 return getattr(self.ui, key)
1911 1918
1912 1919 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1913 1920
1914 1921 if opts['limit']:
1915 1922 try:
1916 1923 limit = int(opts['limit'])
1917 1924 except ValueError:
1918 1925 raise util.Abort(_('limit must be a positive integer'))
1919 1926 if limit <= 0: raise util.Abort(_('limit must be positive'))
1920 1927 else:
1921 1928 limit = sys.maxint
1922 1929 count = 0
1923 1930
1924 1931 displayer = show_changeset(ui, repo, opts)
1925 1932 for st, rev, fns in changeiter:
1926 1933 if st == 'window':
1927 1934 du = dui(ui)
1928 1935 displayer.ui = du
1929 1936 elif st == 'add':
1930 1937 du.bump(rev)
1931 1938 changenode = repo.changelog.node(rev)
1932 1939 parents = [p for p in repo.changelog.parents(changenode)
1933 1940 if p != nullid]
1934 1941 if opts['no_merges'] and len(parents) == 2:
1935 1942 continue
1936 1943 if opts['only_merges'] and len(parents) != 2:
1937 1944 continue
1938 1945
1939 1946 if opts['keyword']:
1940 1947 changes = getchange(rev)
1941 1948 miss = 0
1942 1949 for k in [kw.lower() for kw in opts['keyword']]:
1943 1950 if not (k in changes[1].lower() or
1944 1951 k in changes[4].lower() or
1945 1952 k in " ".join(changes[3][:20]).lower()):
1946 1953 miss = 1
1947 1954 break
1948 1955 if miss:
1949 1956 continue
1950 1957
1951 1958 br = None
1952 1959 if opts['branches']:
1953 1960 br = repo.branchlookup([repo.changelog.node(rev)])
1954 1961
1955 1962 displayer.show(rev, brinfo=br)
1956 1963 if opts['patch']:
1957 1964 prev = (parents and parents[0]) or nullid
1958 1965 dodiff(du, du, repo, prev, changenode, match=matchfn)
1959 1966 du.write("\n\n")
1960 1967 elif st == 'iter':
1961 1968 if count == limit: break
1962 1969 if du.hunk[rev]:
1963 1970 count += 1
1964 1971 for args in du.hunk[rev]:
1965 1972 ui.write(*args)
1966 1973
1967 1974 def manifest(ui, repo, rev=None):
1968 1975 """output the latest or given revision of the project manifest
1969 1976
1970 1977 Print a list of version controlled files for the given revision.
1971 1978
1972 1979 The manifest is the list of files being version controlled. If no revision
1973 1980 is given then the tip is used.
1974 1981 """
1975 1982 if rev:
1976 1983 try:
1977 1984 # assume all revision numbers are for changesets
1978 1985 n = repo.lookup(rev)
1979 1986 change = repo.changelog.read(n)
1980 1987 n = change[0]
1981 1988 except hg.RepoError:
1982 1989 n = repo.manifest.lookup(rev)
1983 1990 else:
1984 1991 n = repo.manifest.tip()
1985 1992 m = repo.manifest.read(n)
1986 1993 mf = repo.manifest.readflags(n)
1987 1994 files = m.keys()
1988 1995 files.sort()
1989 1996
1990 1997 for f in files:
1991 1998 ui.write("%40s %3s %s\n" % (hex(m[f]), mf[f] and "755" or "644", f))
1992 1999
1993 2000 def outgoing(ui, repo, dest="default-push", **opts):
1994 2001 """show changesets not found in destination
1995 2002
1996 2003 Show changesets not found in the specified destination repository or
1997 2004 the default push location. These are the changesets that would be pushed
1998 2005 if a push was requested.
1999 2006
2000 2007 See pull for valid destination format details.
2001 2008 """
2002 2009 dest = ui.expandpath(dest)
2003 2010 if opts['ssh']:
2004 2011 ui.setconfig("ui", "ssh", opts['ssh'])
2005 2012 if opts['remotecmd']:
2006 2013 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2007 2014
2008 2015 other = hg.repository(ui, dest)
2009 2016 o = repo.findoutgoing(other, force=opts['force'])
2010 2017 o = repo.changelog.nodesbetween(o)[0]
2011 2018 if opts['newest_first']:
2012 2019 o.reverse()
2013 2020 displayer = show_changeset(ui, repo, opts)
2014 2021 for n in o:
2015 2022 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2016 2023 if opts['no_merges'] and len(parents) == 2:
2017 2024 continue
2018 2025 displayer.show(changenode=n)
2019 2026 if opts['patch']:
2020 2027 prev = (parents and parents[0]) or nullid
2021 2028 dodiff(ui, ui, repo, prev, n)
2022 2029 ui.write("\n")
2023 2030
2024 2031 def parents(ui, repo, rev=None, branches=None, **opts):
2025 2032 """show the parents of the working dir or revision
2026 2033
2027 2034 Print the working directory's parent revisions.
2028 2035 """
2029 2036 if rev:
2030 2037 p = repo.changelog.parents(repo.lookup(rev))
2031 2038 else:
2032 2039 p = repo.dirstate.parents()
2033 2040
2034 2041 br = None
2035 2042 if branches is not None:
2036 2043 br = repo.branchlookup(p)
2037 2044 displayer = show_changeset(ui, repo, opts)
2038 2045 for n in p:
2039 2046 if n != nullid:
2040 2047 displayer.show(changenode=n, brinfo=br)
2041 2048
2042 2049 def paths(ui, repo, search=None):
2043 2050 """show definition of symbolic path names
2044 2051
2045 2052 Show definition of symbolic path name NAME. If no name is given, show
2046 2053 definition of available names.
2047 2054
2048 2055 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2049 2056 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2050 2057 """
2051 2058 if search:
2052 2059 for name, path in ui.configitems("paths"):
2053 2060 if name == search:
2054 2061 ui.write("%s\n" % path)
2055 2062 return
2056 2063 ui.warn(_("not found!\n"))
2057 2064 return 1
2058 2065 else:
2059 2066 for name, path in ui.configitems("paths"):
2060 2067 ui.write("%s = %s\n" % (name, path))
2061 2068
2062 2069 def pull(ui, repo, source="default", **opts):
2063 2070 """pull changes from the specified source
2064 2071
2065 2072 Pull changes from a remote repository to a local one.
2066 2073
2067 2074 This finds all changes from the repository at the specified path
2068 2075 or URL and adds them to the local repository. By default, this
2069 2076 does not update the copy of the project in the working directory.
2070 2077
2071 2078 Valid URLs are of the form:
2072 2079
2073 2080 local/filesystem/path
2074 2081 http://[user@]host[:port][/path]
2075 2082 https://[user@]host[:port][/path]
2076 2083 ssh://[user@]host[:port][/path]
2077 2084
2078 2085 Some notes about using SSH with Mercurial:
2079 2086 - SSH requires an accessible shell account on the destination machine
2080 2087 and a copy of hg in the remote path or specified with as remotecmd.
2081 2088 - /path is relative to the remote user's home directory by default.
2082 2089 Use two slashes at the start of a path to specify an absolute path.
2083 2090 - Mercurial doesn't use its own compression via SSH; the right thing
2084 2091 to do is to configure it in your ~/.ssh/ssh_config, e.g.:
2085 2092 Host *.mylocalnetwork.example.com
2086 2093 Compression off
2087 2094 Host *
2088 2095 Compression on
2089 2096 Alternatively specify "ssh -C" as your ssh command in your hgrc or
2090 2097 with the --ssh command line option.
2091 2098 """
2092 2099 source = ui.expandpath(source)
2093 2100 ui.status(_('pulling from %s\n') % (source))
2094 2101
2095 2102 if opts['ssh']:
2096 2103 ui.setconfig("ui", "ssh", opts['ssh'])
2097 2104 if opts['remotecmd']:
2098 2105 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2099 2106
2100 2107 other = hg.repository(ui, source)
2101 2108 revs = None
2102 2109 if opts['rev'] and not other.local():
2103 2110 raise util.Abort(_("pull -r doesn't work for remote repositories yet"))
2104 2111 elif opts['rev']:
2105 2112 revs = [other.lookup(rev) for rev in opts['rev']]
2106 2113 r = repo.pull(other, heads=revs, force=opts['force'])
2107 2114 if not r:
2108 2115 if opts['update']:
2109 2116 return update(ui, repo)
2110 2117 else:
2111 2118 ui.status(_("(run 'hg update' to get a working copy)\n"))
2112 2119
2113 2120 return r
2114 2121
2115 2122 def push(ui, repo, dest="default-push", **opts):
2116 2123 """push changes to the specified destination
2117 2124
2118 2125 Push changes from the local repository to the given destination.
2119 2126
2120 2127 This is the symmetrical operation for pull. It helps to move
2121 2128 changes from the current repository to a different one. If the
2122 2129 destination is local this is identical to a pull in that directory
2123 2130 from the current one.
2124 2131
2125 2132 By default, push will refuse to run if it detects the result would
2126 2133 increase the number of remote heads. This generally indicates the
2127 2134 the client has forgotten to sync and merge before pushing.
2128 2135
2129 2136 Valid URLs are of the form:
2130 2137
2131 2138 local/filesystem/path
2132 2139 ssh://[user@]host[:port][/path]
2133 2140
2134 2141 Look at the help text for the pull command for important details
2135 2142 about ssh:// URLs.
2136 2143 """
2137 2144 dest = ui.expandpath(dest)
2138 2145 ui.status('pushing to %s\n' % (dest))
2139 2146
2140 2147 if opts['ssh']:
2141 2148 ui.setconfig("ui", "ssh", opts['ssh'])
2142 2149 if opts['remotecmd']:
2143 2150 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2144 2151
2145 2152 other = hg.repository(ui, dest)
2146 2153 revs = None
2147 2154 if opts['rev']:
2148 2155 revs = [repo.lookup(rev) for rev in opts['rev']]
2149 2156 r = repo.push(other, opts['force'], revs=revs)
2150 2157 return r
2151 2158
2152 2159 def rawcommit(ui, repo, *flist, **rc):
2153 2160 """raw commit interface (DEPRECATED)
2154 2161
2155 2162 (DEPRECATED)
2156 2163 Lowlevel commit, for use in helper scripts.
2157 2164
2158 2165 This command is not intended to be used by normal users, as it is
2159 2166 primarily useful for importing from other SCMs.
2160 2167
2161 2168 This command is now deprecated and will be removed in a future
2162 2169 release, please use debugsetparents and commit instead.
2163 2170 """
2164 2171
2165 2172 ui.warn(_("(the rawcommit command is deprecated)\n"))
2166 2173
2167 2174 message = rc['message']
2168 2175 if not message and rc['logfile']:
2169 2176 try:
2170 2177 message = open(rc['logfile']).read()
2171 2178 except IOError:
2172 2179 pass
2173 2180 if not message and not rc['logfile']:
2174 2181 raise util.Abort(_("missing commit message"))
2175 2182
2176 2183 files = relpath(repo, list(flist))
2177 2184 if rc['files']:
2178 2185 files += open(rc['files']).read().splitlines()
2179 2186
2180 2187 rc['parent'] = map(repo.lookup, rc['parent'])
2181 2188
2182 2189 try:
2183 2190 repo.rawcommit(files, message, rc['user'], rc['date'], *rc['parent'])
2184 2191 except ValueError, inst:
2185 2192 raise util.Abort(str(inst))
2186 2193
2187 2194 def recover(ui, repo):
2188 2195 """roll back an interrupted transaction
2189 2196
2190 2197 Recover from an interrupted commit or pull.
2191 2198
2192 2199 This command tries to fix the repository status after an interrupted
2193 2200 operation. It should only be necessary when Mercurial suggests it.
2194 2201 """
2195 2202 if repo.recover():
2196 2203 return repo.verify()
2197 2204 return False
2198 2205
2199 2206 def remove(ui, repo, pat, *pats, **opts):
2200 2207 """remove the specified files on the next commit
2201 2208
2202 2209 Schedule the indicated files for removal from the repository.
2203 2210
2204 2211 This command schedules the files to be removed at the next commit.
2205 2212 This only removes files from the current branch, not from the
2206 2213 entire project history. If the files still exist in the working
2207 2214 directory, they will be deleted from it.
2208 2215 """
2209 2216 names = []
2210 2217 def okaytoremove(abs, rel, exact):
2211 2218 modified, added, removed, deleted, unknown = repo.changes(files=[abs])
2212 2219 reason = None
2213 2220 if modified and not opts['force']:
2214 2221 reason = _('is modified')
2215 2222 elif added:
2216 2223 reason = _('has been marked for add')
2217 2224 elif unknown:
2218 2225 reason = _('is not managed')
2219 2226 if reason:
2220 2227 if exact:
2221 2228 ui.warn(_('not removing %s: file %s\n') % (rel, reason))
2222 2229 else:
2223 2230 return True
2224 2231 for src, abs, rel, exact in walk(repo, (pat,) + pats, opts):
2225 2232 if okaytoremove(abs, rel, exact):
2226 2233 if ui.verbose or not exact:
2227 2234 ui.status(_('removing %s\n') % rel)
2228 2235 names.append(abs)
2229 2236 repo.remove(names, unlink=True)
2230 2237
2231 2238 def rename(ui, repo, *pats, **opts):
2232 2239 """rename files; equivalent of copy + remove
2233 2240
2234 2241 Mark dest as copies of sources; mark sources for deletion. If
2235 2242 dest is a directory, copies are put in that directory. If dest is
2236 2243 a file, there can only be one source.
2237 2244
2238 2245 By default, this command copies the contents of files as they
2239 2246 stand in the working directory. If invoked with --after, the
2240 2247 operation is recorded, but no copying is performed.
2241 2248
2242 2249 This command takes effect in the next commit.
2243 2250
2244 2251 NOTE: This command should be treated as experimental. While it
2245 2252 should properly record rename files, this information is not yet
2246 2253 fully used by merge, nor fully reported by log.
2247 2254 """
2248 2255 try:
2249 2256 wlock = repo.wlock(0)
2250 2257 errs, copied = docopy(ui, repo, pats, opts, wlock)
2251 2258 names = []
2252 2259 for abs, rel, exact in copied:
2253 2260 if ui.verbose or not exact:
2254 2261 ui.status(_('removing %s\n') % rel)
2255 2262 names.append(abs)
2256 2263 repo.remove(names, True, wlock)
2257 2264 except lock.LockHeld, inst:
2258 2265 ui.warn(_("repository lock held by %s\n") % inst.args[0])
2259 2266 errs = 1
2260 2267 return errs
2261 2268
2262 2269 def revert(ui, repo, *pats, **opts):
2263 2270 """revert modified files or dirs back to their unmodified states
2264 2271
2265 2272 In its default mode, it reverts any uncommitted modifications made
2266 2273 to the named files or directories. This restores the contents of
2267 2274 the affected files to an unmodified state.
2268 2275
2269 2276 Using the -r option, it reverts the given files or directories to
2270 2277 their state as of an earlier revision. This can be helpful to "roll
2271 2278 back" some or all of a change that should not have been committed.
2272 2279
2273 2280 Revert modifies the working directory. It does not commit any
2274 2281 changes, or change the parent of the current working directory.
2275 2282
2276 2283 If a file has been deleted, it is recreated. If the executable
2277 2284 mode of a file was changed, it is reset.
2278 2285
2279 2286 If names are given, all files matching the names are reverted.
2280 2287
2281 2288 If no arguments are given, all files in the repository are reverted.
2282 2289 """
2283 2290 node = opts['rev'] and repo.lookup(opts['rev']) or \
2284 2291 repo.dirstate.parents()[0]
2285 2292
2286 2293 files, choose, anypats = matchpats(repo, pats, opts)
2287 2294 modified, added, removed, deleted, unknown = repo.changes(match=choose)
2288 2295 repo.forget(added)
2289 2296 repo.undelete(removed)
2290 2297
2291 2298 return repo.update(node, False, True, choose, False)
2292 2299
2293 2300 def root(ui, repo):
2294 2301 """print the root (top) of the current working dir
2295 2302
2296 2303 Print the root directory of the current repository.
2297 2304 """
2298 2305 ui.write(repo.root + "\n")
2299 2306
2300 2307 def serve(ui, repo, **opts):
2301 2308 """export the repository via HTTP
2302 2309
2303 2310 Start a local HTTP repository browser and pull server.
2304 2311
2305 2312 By default, the server logs accesses to stdout and errors to
2306 2313 stderr. Use the "-A" and "-E" options to log to files.
2307 2314 """
2308 2315
2309 2316 if opts["stdio"]:
2310 2317 fin, fout = sys.stdin, sys.stdout
2311 2318 sys.stdout = sys.stderr
2312 2319
2313 2320 # Prevent insertion/deletion of CRs
2314 2321 util.set_binary(fin)
2315 2322 util.set_binary(fout)
2316 2323
2317 2324 def getarg():
2318 2325 argline = fin.readline()[:-1]
2319 2326 arg, l = argline.split()
2320 2327 val = fin.read(int(l))
2321 2328 return arg, val
2322 2329 def respond(v):
2323 2330 fout.write("%d\n" % len(v))
2324 2331 fout.write(v)
2325 2332 fout.flush()
2326 2333
2327 2334 lock = None
2328 2335
2329 2336 while 1:
2330 2337 cmd = fin.readline()[:-1]
2331 2338 if cmd == '':
2332 2339 return
2333 2340 if cmd == "heads":
2334 2341 h = repo.heads()
2335 2342 respond(" ".join(map(hex, h)) + "\n")
2336 2343 if cmd == "lock":
2337 2344 lock = repo.lock()
2338 2345 respond("")
2339 2346 if cmd == "unlock":
2340 2347 if lock:
2341 2348 lock.release()
2342 2349 lock = None
2343 2350 respond("")
2344 2351 elif cmd == "branches":
2345 2352 arg, nodes = getarg()
2346 2353 nodes = map(bin, nodes.split(" "))
2347 2354 r = []
2348 2355 for b in repo.branches(nodes):
2349 2356 r.append(" ".join(map(hex, b)) + "\n")
2350 2357 respond("".join(r))
2351 2358 elif cmd == "between":
2352 2359 arg, pairs = getarg()
2353 2360 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
2354 2361 r = []
2355 2362 for b in repo.between(pairs):
2356 2363 r.append(" ".join(map(hex, b)) + "\n")
2357 2364 respond("".join(r))
2358 2365 elif cmd == "changegroup":
2359 2366 nodes = []
2360 2367 arg, roots = getarg()
2361 2368 nodes = map(bin, roots.split(" "))
2362 2369
2363 2370 cg = repo.changegroup(nodes, 'serve')
2364 2371 while 1:
2365 2372 d = cg.read(4096)
2366 2373 if not d:
2367 2374 break
2368 2375 fout.write(d)
2369 2376
2370 2377 fout.flush()
2371 2378
2372 2379 elif cmd == "addchangegroup":
2373 2380 if not lock:
2374 2381 respond("not locked")
2375 2382 continue
2376 2383 respond("")
2377 2384
2378 2385 r = repo.addchangegroup(fin)
2379 2386 respond("")
2380 2387
2381 2388 optlist = "name templates style address port ipv6 accesslog errorlog"
2382 2389 for o in optlist.split():
2383 2390 if opts[o]:
2384 2391 ui.setconfig("web", o, opts[o])
2385 2392
2386 2393 if opts['daemon'] and not opts['daemon_pipefds']:
2387 2394 rfd, wfd = os.pipe()
2388 2395 args = sys.argv[:]
2389 2396 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
2390 2397 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
2391 2398 args[0], args)
2392 2399 os.close(wfd)
2393 2400 os.read(rfd, 1)
2394 2401 os._exit(0)
2395 2402
2396 2403 try:
2397 2404 httpd = hgweb.create_server(repo)
2398 2405 except socket.error, inst:
2399 2406 raise util.Abort(_('cannot start server: ') + inst.args[1])
2400 2407
2401 2408 if ui.verbose:
2402 2409 addr, port = httpd.socket.getsockname()
2403 2410 if addr == '0.0.0.0':
2404 2411 addr = socket.gethostname()
2405 2412 else:
2406 2413 try:
2407 2414 addr = socket.gethostbyaddr(addr)[0]
2408 2415 except socket.error:
2409 2416 pass
2410 2417 if port != 80:
2411 2418 ui.status(_('listening at http://%s:%d/\n') % (addr, port))
2412 2419 else:
2413 2420 ui.status(_('listening at http://%s/\n') % addr)
2414 2421
2415 2422 if opts['pid_file']:
2416 2423 fp = open(opts['pid_file'], 'w')
2417 2424 fp.write(str(os.getpid()))
2418 2425 fp.close()
2419 2426
2420 2427 if opts['daemon_pipefds']:
2421 2428 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
2422 2429 os.close(rfd)
2423 2430 os.write(wfd, 'y')
2424 2431 os.close(wfd)
2425 2432 sys.stdout.flush()
2426 2433 sys.stderr.flush()
2427 2434 fd = os.open(util.nulldev, os.O_RDWR)
2428 2435 if fd != 0: os.dup2(fd, 0)
2429 2436 if fd != 1: os.dup2(fd, 1)
2430 2437 if fd != 2: os.dup2(fd, 2)
2431 2438 if fd not in (0, 1, 2): os.close(fd)
2432 2439
2433 2440 httpd.serve_forever()
2434 2441
2435 2442 def status(ui, repo, *pats, **opts):
2436 2443 """show changed files in the working directory
2437 2444
2438 2445 Show changed files in the repository. If names are
2439 2446 given, only files that match are shown.
2440 2447
2441 2448 The codes used to show the status of files are:
2442 2449 M = modified
2443 2450 A = added
2444 2451 R = removed
2445 2452 ! = deleted, but still tracked
2446 2453 ? = not tracked
2447 2454 """
2448 2455
2449 2456 files, matchfn, anypats = matchpats(repo, pats, opts)
2450 2457 cwd = (pats and repo.getcwd()) or ''
2451 2458 modified, added, removed, deleted, unknown = [
2452 2459 [util.pathto(cwd, x) for x in n]
2453 2460 for n in repo.changes(files=files, match=matchfn)]
2454 2461
2455 2462 changetypes = [('modified', 'M', modified),
2456 2463 ('added', 'A', added),
2457 2464 ('removed', 'R', removed),
2458 2465 ('deleted', '!', deleted),
2459 2466 ('unknown', '?', unknown)]
2460 2467
2461 2468 end = opts['print0'] and '\0' or '\n'
2462 2469
2463 2470 for opt, char, changes in ([ct for ct in changetypes if opts[ct[0]]]
2464 2471 or changetypes):
2465 2472 if opts['no_status']:
2466 2473 format = "%%s%s" % end
2467 2474 else:
2468 2475 format = "%s %%s%s" % (char, end)
2469 2476
2470 2477 for f in changes:
2471 2478 ui.write(format % f)
2472 2479
2473 2480 def tag(ui, repo, name, rev_=None, **opts):
2474 2481 """add a tag for the current tip or a given revision
2475 2482
2476 2483 Name a particular revision using <name>.
2477 2484
2478 2485 Tags are used to name particular revisions of the repository and are
2479 2486 very useful to compare different revision, to go back to significant
2480 2487 earlier versions or to mark branch points as releases, etc.
2481 2488
2482 2489 If no revision is given, the tip is used.
2483 2490
2484 2491 To facilitate version control, distribution, and merging of tags,
2485 2492 they are stored as a file named ".hgtags" which is managed
2486 2493 similarly to other project files and can be hand-edited if
2487 2494 necessary. The file '.hg/localtags' is used for local tags (not
2488 2495 shared among repositories).
2489 2496 """
2490 2497 if name == "tip":
2491 2498 raise util.Abort(_("the name 'tip' is reserved"))
2492 2499 if rev_ is not None:
2493 2500 ui.warn(_("use of 'hg tag NAME [REV]' is deprecated, "
2494 2501 "please use 'hg tag [-r REV] NAME' instead\n"))
2495 2502 if opts['rev']:
2496 2503 raise util.Abort(_("use only one form to specify the revision"))
2497 2504 if opts['rev']:
2498 2505 rev_ = opts['rev']
2499 2506 if rev_:
2500 2507 r = hex(repo.lookup(rev_))
2501 2508 else:
2502 2509 r = hex(repo.changelog.tip())
2503 2510
2504 2511 disallowed = (revrangesep, '\r', '\n')
2505 2512 for c in disallowed:
2506 2513 if name.find(c) >= 0:
2507 2514 raise util.Abort(_("%s cannot be used in a tag name") % repr(c))
2508 2515
2509 2516 repo.hook('pretag', throw=True, node=r, tag=name,
2510 2517 local=int(not not opts['local']))
2511 2518
2512 2519 if opts['local']:
2513 2520 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
2514 2521 repo.hook('tag', node=r, tag=name, local=1)
2515 2522 return
2516 2523
2517 2524 for x in repo.changes():
2518 2525 if ".hgtags" in x:
2519 2526 raise util.Abort(_("working copy of .hgtags is changed "
2520 2527 "(please commit .hgtags manually)"))
2521 2528
2522 2529 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
2523 2530 if repo.dirstate.state(".hgtags") == '?':
2524 2531 repo.add([".hgtags"])
2525 2532
2526 2533 message = (opts['message'] or
2527 2534 _("Added tag %s for changeset %s") % (name, r))
2528 2535 try:
2529 2536 repo.commit([".hgtags"], message, opts['user'], opts['date'])
2530 2537 repo.hook('tag', node=r, tag=name, local=0)
2531 2538 except ValueError, inst:
2532 2539 raise util.Abort(str(inst))
2533 2540
2534 2541 def tags(ui, repo):
2535 2542 """list repository tags
2536 2543
2537 2544 List the repository tags.
2538 2545
2539 2546 This lists both regular and local tags.
2540 2547 """
2541 2548
2542 2549 l = repo.tagslist()
2543 2550 l.reverse()
2544 2551 for t, n in l:
2545 2552 try:
2546 2553 r = "%5d:%s" % (repo.changelog.rev(n), hex(n))
2547 2554 except KeyError:
2548 2555 r = " ?:?"
2549 2556 ui.write("%-30s %s\n" % (t, r))
2550 2557
2551 2558 def tip(ui, repo, **opts):
2552 2559 """show the tip revision
2553 2560
2554 2561 Show the tip revision.
2555 2562 """
2556 2563 n = repo.changelog.tip()
2557 2564 br = None
2558 2565 if opts['branches']:
2559 2566 br = repo.branchlookup([n])
2560 2567 show_changeset(ui, repo, opts).show(changenode=n, brinfo=br)
2561 2568 if opts['patch']:
2562 2569 dodiff(ui, ui, repo, repo.changelog.parents(n)[0], n)
2563 2570
2564 2571 def unbundle(ui, repo, fname, **opts):
2565 2572 """apply a changegroup file
2566 2573
2567 2574 Apply a compressed changegroup file generated by the bundle
2568 2575 command.
2569 2576 """
2570 2577 f = urllib.urlopen(fname)
2571 2578
2572 2579 header = f.read(6)
2573 2580 if not header.startswith("HG"):
2574 2581 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
2575 2582 elif not header.startswith("HG10"):
2576 2583 raise util.Abort(_("%s: unknown bundle version") % fname)
2577 2584 elif header == "HG10BZ":
2578 2585 def generator(f):
2579 2586 zd = bz2.BZ2Decompressor()
2580 2587 zd.decompress("BZ")
2581 2588 for chunk in f:
2582 2589 yield zd.decompress(chunk)
2583 2590 elif header == "HG10UN":
2584 2591 def generator(f):
2585 2592 for chunk in f:
2586 2593 yield chunk
2587 2594 else:
2588 2595 raise util.Abort(_("%s: unknown bundle compression type")
2589 2596 % fname)
2590 2597 gen = generator(util.filechunkiter(f, 4096))
2591 2598 if repo.addchangegroup(util.chunkbuffer(gen)):
2592 2599 return 1
2593 2600
2594 2601 if opts['update']:
2595 2602 return update(ui, repo)
2596 2603 else:
2597 2604 ui.status(_("(run 'hg update' to get a working copy)\n"))
2598 2605
2599 2606 def undo(ui, repo):
2600 2607 """undo the last commit or pull
2601 2608
2602 2609 Roll back the last pull or commit transaction on the
2603 2610 repository, restoring the project to its earlier state.
2604 2611
2605 2612 This command should be used with care. There is only one level of
2606 2613 undo and there is no redo.
2607 2614
2608 2615 This command is not intended for use on public repositories. Once
2609 2616 a change is visible for pull by other users, undoing it locally is
2610 2617 ineffective.
2611 2618 """
2612 2619 repo.undo()
2613 2620
2614 2621 def update(ui, repo, node=None, merge=False, clean=False, force=None,
2615 2622 branch=None, **opts):
2616 2623 """update or merge working directory
2617 2624
2618 2625 Update the working directory to the specified revision.
2619 2626
2620 2627 If there are no outstanding changes in the working directory and
2621 2628 there is a linear relationship between the current version and the
2622 2629 requested version, the result is the requested version.
2623 2630
2624 2631 Otherwise the result is a merge between the contents of the
2625 2632 current working directory and the requested version. Files that
2626 2633 changed between either parent are marked as changed for the next
2627 2634 commit and a commit must be performed before any further updates
2628 2635 are allowed.
2629 2636
2630 2637 By default, update will refuse to run if doing so would require
2631 2638 merging or discarding local changes.
2632 2639 """
2633 2640 if branch:
2634 2641 br = repo.branchlookup(branch=branch)
2635 2642 found = []
2636 2643 for x in br:
2637 2644 if branch in br[x]:
2638 2645 found.append(x)
2639 2646 if len(found) > 1:
2640 2647 ui.warn(_("Found multiple heads for %s\n") % branch)
2641 2648 for x in found:
2642 2649 show_changeset(ui, repo, opts).show(changenode=x, brinfo=br)
2643 2650 return 1
2644 2651 if len(found) == 1:
2645 2652 node = found[0]
2646 2653 ui.warn(_("Using head %s for branch %s\n") % (short(node), branch))
2647 2654 else:
2648 2655 ui.warn(_("branch %s not found\n") % (branch))
2649 2656 return 1
2650 2657 else:
2651 2658 node = node and repo.lookup(node) or repo.changelog.tip()
2652 2659 return repo.update(node, allow=merge, force=clean, forcemerge=force)
2653 2660
2654 2661 def verify(ui, repo):
2655 2662 """verify the integrity of the repository
2656 2663
2657 2664 Verify the integrity of the current repository.
2658 2665
2659 2666 This will perform an extensive check of the repository's
2660 2667 integrity, validating the hashes and checksums of each entry in
2661 2668 the changelog, manifest, and tracked files, as well as the
2662 2669 integrity of their crosslinks and indices.
2663 2670 """
2664 2671 return repo.verify()
2665 2672
2666 2673 # Command options and aliases are listed here, alphabetically
2667 2674
2668 2675 table = {
2669 2676 "^add":
2670 2677 (add,
2671 2678 [('I', 'include', [], _('include names matching the given patterns')),
2672 2679 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2673 2680 _('hg add [OPTION]... [FILE]...')),
2674 2681 "addremove":
2675 2682 (addremove,
2676 2683 [('I', 'include', [], _('include names matching the given patterns')),
2677 2684 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2678 2685 _('hg addremove [OPTION]... [FILE]...')),
2679 2686 "^annotate":
2680 2687 (annotate,
2681 2688 [('r', 'rev', '', _('annotate the specified revision')),
2682 2689 ('a', 'text', None, _('treat all files as text')),
2683 2690 ('u', 'user', None, _('list the author')),
2684 2691 ('d', 'date', None, _('list the date')),
2685 2692 ('n', 'number', None, _('list the revision number (default)')),
2686 2693 ('c', 'changeset', None, _('list the changeset')),
2687 2694 ('I', 'include', [], _('include names matching the given patterns')),
2688 2695 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2689 2696 _('hg annotate [-r REV] [-a] [-u] [-d] [-n] [-c] FILE...')),
2690 2697 "bundle":
2691 2698 (bundle,
2692 2699 [('f', 'force', None,
2693 2700 _('run even when remote repository is unrelated'))],
2694 2701 _('hg bundle FILE DEST')),
2695 2702 "cat":
2696 2703 (cat,
2697 2704 [('o', 'output', '', _('print output to file with formatted name')),
2698 2705 ('r', 'rev', '', _('print the given revision')),
2699 2706 ('I', 'include', [], _('include names matching the given patterns')),
2700 2707 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2701 2708 _('hg cat [OPTION]... FILE...')),
2702 2709 "^clone":
2703 2710 (clone,
2704 2711 [('U', 'noupdate', None, _('do not update the new working directory')),
2705 2712 ('r', 'rev', [],
2706 2713 _('a changeset you would like to have after cloning')),
2707 2714 ('', 'pull', None, _('use pull protocol to copy metadata')),
2708 2715 ('e', 'ssh', '', _('specify ssh command to use')),
2709 2716 ('', 'remotecmd', '',
2710 2717 _('specify hg command to run on the remote side'))],
2711 2718 _('hg clone [OPTION]... SOURCE [DEST]')),
2712 2719 "^commit|ci":
2713 2720 (commit,
2714 2721 [('A', 'addremove', None, _('run addremove during commit')),
2715 2722 ('m', 'message', '', _('use <text> as commit message')),
2716 2723 ('l', 'logfile', '', _('read the commit message from <file>')),
2717 2724 ('d', 'date', '', _('record datecode as commit date')),
2718 2725 ('u', 'user', '', _('record user as commiter')),
2719 2726 ('I', 'include', [], _('include names matching the given patterns')),
2720 2727 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2721 2728 _('hg commit [OPTION]... [FILE]...')),
2722 2729 "copy|cp":
2723 2730 (copy,
2724 2731 [('A', 'after', None, _('record a copy that has already occurred')),
2725 2732 ('f', 'force', None,
2726 2733 _('forcibly copy over an existing managed file')),
2727 2734 ('I', 'include', [], _('include names matching the given patterns')),
2728 2735 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2729 2736 _('hg copy [OPTION]... [SOURCE]... DEST')),
2730 2737 "debugancestor": (debugancestor, [], _('debugancestor INDEX REV1 REV2')),
2731 2738 "debugcomplete": (debugcomplete, [], _('debugcomplete CMD')),
2732 2739 "debugrebuildstate":
2733 2740 (debugrebuildstate,
2734 2741 [('r', 'rev', '', _('revision to rebuild to'))],
2735 2742 _('debugrebuildstate [-r REV] [REV]')),
2736 2743 "debugcheckstate": (debugcheckstate, [], _('debugcheckstate')),
2737 2744 "debugconfig": (debugconfig, [], _('debugconfig')),
2738 2745 "debugsetparents": (debugsetparents, [], _('debugsetparents REV1 [REV2]')),
2739 2746 "debugstate": (debugstate, [], _('debugstate')),
2740 2747 "debugdata": (debugdata, [], _('debugdata FILE REV')),
2741 2748 "debugindex": (debugindex, [], _('debugindex FILE')),
2742 2749 "debugindexdot": (debugindexdot, [], _('debugindexdot FILE')),
2743 2750 "debugrename": (debugrename, [], _('debugrename FILE [REV]')),
2744 2751 "debugwalk":
2745 2752 (debugwalk,
2746 2753 [('I', 'include', [], _('include names matching the given patterns')),
2747 2754 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2748 2755 _('debugwalk [OPTION]... [FILE]...')),
2749 2756 "^diff":
2750 2757 (diff,
2751 2758 [('r', 'rev', [], _('revision')),
2752 2759 ('a', 'text', None, _('treat all files as text')),
2753 2760 ('p', 'show-function', None,
2754 2761 _('show which function each change is in')),
2755 2762 ('w', 'ignore-all-space', None,
2756 2763 _('ignore white space when comparing lines')),
2757 2764 ('I', 'include', [], _('include names matching the given patterns')),
2758 2765 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2759 2766 _('hg diff [-a] [-I] [-X] [-r REV1 [-r REV2]] [FILE]...')),
2760 2767 "^export":
2761 2768 (export,
2762 2769 [('o', 'output', '', _('print output to file with formatted name')),
2763 2770 ('a', 'text', None, _('treat all files as text')),
2764 2771 ('', 'switch-parent', None, _('diff against the second parent'))],
2765 2772 _('hg export [-a] [-o OUTFILESPEC] REV...')),
2766 2773 "forget":
2767 2774 (forget,
2768 2775 [('I', 'include', [], _('include names matching the given patterns')),
2769 2776 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2770 2777 _('hg forget [OPTION]... FILE...')),
2771 2778 "grep":
2772 2779 (grep,
2773 2780 [('0', 'print0', None, _('end fields with NUL')),
2774 2781 ('', 'all', None, _('print all revisions that match')),
2775 2782 ('i', 'ignore-case', None, _('ignore case when matching')),
2776 2783 ('l', 'files-with-matches', None,
2777 2784 _('print only filenames and revs that match')),
2778 2785 ('n', 'line-number', None, _('print matching line numbers')),
2779 2786 ('r', 'rev', [], _('search in given revision range')),
2780 2787 ('u', 'user', None, _('print user who committed change')),
2781 2788 ('I', 'include', [], _('include names matching the given patterns')),
2782 2789 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2783 2790 _('hg grep [OPTION]... PATTERN [FILE]...')),
2784 2791 "heads":
2785 2792 (heads,
2786 2793 [('b', 'branches', None, _('show branches')),
2787 2794 ('', 'style', '', _('display using template map file')),
2788 2795 ('r', 'rev', '', _('show only heads which are descendants of rev')),
2789 2796 ('', 'template', '', _('display with template'))],
2790 2797 _('hg heads [-b] [-r <rev>]')),
2791 2798 "help": (help_, [], _('hg help [COMMAND]')),
2792 2799 "identify|id": (identify, [], _('hg identify')),
2793 2800 "import|patch":
2794 2801 (import_,
2795 2802 [('p', 'strip', 1,
2796 2803 _('directory strip option for patch. This has the same\n') +
2797 2804 _('meaning as the corresponding patch option')),
2798 2805 ('b', 'base', '', _('base path')),
2799 2806 ('f', 'force', None,
2800 2807 _('skip check for outstanding uncommitted changes'))],
2801 2808 _('hg import [-p NUM] [-b BASE] [-f] PATCH...')),
2802 2809 "incoming|in": (incoming,
2803 2810 [('M', 'no-merges', None, _('do not show merges')),
2804 2811 ('f', 'force', None,
2805 2812 _('run even when remote repository is unrelated')),
2806 2813 ('', 'style', '', _('display using template map file')),
2807 2814 ('n', 'newest-first', None, _('show newest record first')),
2808 2815 ('', 'bundle', '', _('file to store the bundles into')),
2809 2816 ('p', 'patch', None, _('show patch')),
2810 2817 ('', 'template', '', _('display with template')),
2811 2818 ('e', 'ssh', '', _('specify ssh command to use')),
2812 2819 ('', 'remotecmd', '',
2813 2820 _('specify hg command to run on the remote side'))],
2814 2821 _('hg incoming [-p] [-n] [-M] [--bundle FILENAME] [SOURCE]')),
2815 2822 "^init": (init, [], _('hg init [DEST]')),
2816 2823 "locate":
2817 2824 (locate,
2818 2825 [('r', 'rev', '', _('search the repository as it stood at rev')),
2819 2826 ('0', 'print0', None,
2820 2827 _('end filenames with NUL, for use with xargs')),
2821 2828 ('f', 'fullpath', None,
2822 2829 _('print complete paths from the filesystem root')),
2823 2830 ('I', 'include', [], _('include names matching the given patterns')),
2824 2831 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2825 2832 _('hg locate [OPTION]... [PATTERN]...')),
2826 2833 "^log|history":
2827 2834 (log,
2828 2835 [('b', 'branches', None, _('show branches')),
2829 2836 ('k', 'keyword', [], _('search for a keyword')),
2830 2837 ('l', 'limit', '', _('limit number of changes displayed')),
2831 2838 ('r', 'rev', [], _('show the specified revision or range')),
2832 2839 ('M', 'no-merges', None, _('do not show merges')),
2833 2840 ('', 'style', '', _('display using template map file')),
2834 2841 ('m', 'only-merges', None, _('show only merges')),
2835 2842 ('p', 'patch', None, _('show patch')),
2836 2843 ('', 'template', '', _('display with template')),
2837 2844 ('I', 'include', [], _('include names matching the given patterns')),
2838 2845 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2839 2846 _('hg log [OPTION]... [FILE]')),
2840 2847 "manifest": (manifest, [], _('hg manifest [REV]')),
2841 2848 "outgoing|out": (outgoing,
2842 2849 [('M', 'no-merges', None, _('do not show merges')),
2843 2850 ('f', 'force', None,
2844 2851 _('run even when remote repository is unrelated')),
2845 2852 ('p', 'patch', None, _('show patch')),
2846 2853 ('', 'style', '', _('display using template map file')),
2847 2854 ('n', 'newest-first', None, _('show newest record first')),
2848 2855 ('', 'template', '', _('display with template')),
2849 2856 ('e', 'ssh', '', _('specify ssh command to use')),
2850 2857 ('', 'remotecmd', '',
2851 2858 _('specify hg command to run on the remote side'))],
2852 2859 _('hg outgoing [-M] [-p] [-n] [DEST]')),
2853 2860 "^parents":
2854 2861 (parents,
2855 2862 [('b', 'branches', None, _('show branches')),
2856 2863 ('', 'style', '', _('display using template map file')),
2857 2864 ('', 'template', '', _('display with template'))],
2858 2865 _('hg parents [-b] [REV]')),
2859 2866 "paths": (paths, [], _('hg paths [NAME]')),
2860 2867 "^pull":
2861 2868 (pull,
2862 2869 [('u', 'update', None,
2863 2870 _('update the working directory to tip after pull')),
2864 2871 ('e', 'ssh', '', _('specify ssh command to use')),
2865 2872 ('f', 'force', None,
2866 2873 _('run even when remote repository is unrelated')),
2867 2874 ('r', 'rev', [], _('a specific revision you would like to pull')),
2868 2875 ('', 'remotecmd', '',
2869 2876 _('specify hg command to run on the remote side'))],
2870 2877 _('hg pull [-u] [-e FILE] [-r REV]... [--remotecmd FILE] [SOURCE]')),
2871 2878 "^push":
2872 2879 (push,
2873 2880 [('f', 'force', None, _('force push')),
2874 2881 ('e', 'ssh', '', _('specify ssh command to use')),
2875 2882 ('r', 'rev', [], _('a specific revision you would like to push')),
2876 2883 ('', 'remotecmd', '',
2877 2884 _('specify hg command to run on the remote side'))],
2878 2885 _('hg push [-f] [-e FILE] [-r REV]... [--remotecmd FILE] [DEST]')),
2879 2886 "debugrawcommit|rawcommit":
2880 2887 (rawcommit,
2881 2888 [('p', 'parent', [], _('parent')),
2882 2889 ('d', 'date', '', _('date code')),
2883 2890 ('u', 'user', '', _('user')),
2884 2891 ('F', 'files', '', _('file list')),
2885 2892 ('m', 'message', '', _('commit message')),
2886 2893 ('l', 'logfile', '', _('commit message file'))],
2887 2894 _('hg debugrawcommit [OPTION]... [FILE]...')),
2888 2895 "recover": (recover, [], _('hg recover')),
2889 2896 "^remove|rm":
2890 2897 (remove,
2891 2898 [('f', 'force', None, _('remove file even if modified')),
2892 2899 ('I', 'include', [], _('include names matching the given patterns')),
2893 2900 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2894 2901 _('hg remove [OPTION]... FILE...')),
2895 2902 "rename|mv":
2896 2903 (rename,
2897 2904 [('A', 'after', None, _('record a rename that has already occurred')),
2898 2905 ('f', 'force', None,
2899 2906 _('forcibly copy over an existing managed file')),
2900 2907 ('I', 'include', [], _('include names matching the given patterns')),
2901 2908 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2902 2909 _('hg rename [OPTION]... SOURCE... DEST')),
2903 2910 "^revert":
2904 2911 (revert,
2905 2912 [('r', 'rev', '', _('revision to revert to')),
2906 2913 ('I', 'include', [], _('include names matching the given patterns')),
2907 2914 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2908 2915 _('hg revert [-r REV] [NAME]...')),
2909 2916 "root": (root, [], _('hg root')),
2910 2917 "^serve":
2911 2918 (serve,
2912 2919 [('A', 'accesslog', '', _('name of access log file to write to')),
2913 2920 ('d', 'daemon', None, _('run server in background')),
2914 2921 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
2915 2922 ('E', 'errorlog', '', _('name of error log file to write to')),
2916 2923 ('p', 'port', 0, _('port to use (default: 8000)')),
2917 2924 ('a', 'address', '', _('address to use')),
2918 2925 ('n', 'name', '',
2919 2926 _('name to show in web pages (default: working dir)')),
2920 2927 ('', 'pid-file', '', _('name of file to write process ID to')),
2921 2928 ('', 'stdio', None, _('for remote clients')),
2922 2929 ('t', 'templates', '', _('web templates to use')),
2923 2930 ('', 'style', '', _('template style to use')),
2924 2931 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4'))],
2925 2932 _('hg serve [OPTION]...')),
2926 2933 "^status|st":
2927 2934 (status,
2928 2935 [('m', 'modified', None, _('show only modified files')),
2929 2936 ('a', 'added', None, _('show only added files')),
2930 2937 ('r', 'removed', None, _('show only removed files')),
2931 2938 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
2932 2939 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
2933 2940 ('n', 'no-status', None, _('hide status prefix')),
2934 2941 ('0', 'print0', None,
2935 2942 _('end filenames with NUL, for use with xargs')),
2936 2943 ('I', 'include', [], _('include names matching the given patterns')),
2937 2944 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2938 2945 _('hg status [OPTION]... [FILE]...')),
2939 2946 "tag":
2940 2947 (tag,
2941 2948 [('l', 'local', None, _('make the tag local')),
2942 2949 ('m', 'message', '', _('message for tag commit log entry')),
2943 2950 ('d', 'date', '', _('record datecode as commit date')),
2944 2951 ('u', 'user', '', _('record user as commiter')),
2945 2952 ('r', 'rev', '', _('revision to tag'))],
2946 2953 _('hg tag [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME')),
2947 2954 "tags": (tags, [], _('hg tags')),
2948 2955 "tip":
2949 2956 (tip,
2950 2957 [('b', 'branches', None, _('show branches')),
2951 2958 ('', 'style', '', _('display using template map file')),
2952 2959 ('p', 'patch', None, _('show patch')),
2953 2960 ('', 'template', '', _('display with template'))],
2954 2961 _('hg tip [-b] [-p]')),
2955 2962 "unbundle":
2956 2963 (unbundle,
2957 2964 [('u', 'update', None,
2958 2965 _('update the working directory to tip after unbundle'))],
2959 2966 _('hg unbundle [-u] FILE')),
2960 2967 "undo": (undo, [], _('hg undo')),
2961 2968 "^update|up|checkout|co":
2962 2969 (update,
2963 2970 [('b', 'branch', '', _('checkout the head of a specific branch')),
2964 2971 ('', 'style', '', _('display using template map file')),
2965 2972 ('m', 'merge', None, _('allow merging of branches')),
2966 2973 ('C', 'clean', None, _('overwrite locally modified files')),
2967 2974 ('f', 'force', None, _('force a merge with outstanding changes')),
2968 2975 ('', 'template', '', _('display with template'))],
2969 2976 _('hg update [-b TAG] [-m] [-C] [-f] [REV]')),
2970 2977 "verify": (verify, [], _('hg verify')),
2971 2978 "version": (show_version, [], _('hg version')),
2972 2979 }
2973 2980
2974 2981 globalopts = [
2975 2982 ('R', 'repository', '',
2976 2983 _('repository root directory or symbolic path name')),
2977 2984 ('', 'cwd', '', _('change working directory')),
2978 2985 ('y', 'noninteractive', None,
2979 2986 _('do not prompt, assume \'yes\' for any required answers')),
2980 2987 ('q', 'quiet', None, _('suppress output')),
2981 2988 ('v', 'verbose', None, _('enable additional output')),
2982 2989 ('', 'debug', None, _('enable debugging output')),
2983 2990 ('', 'debugger', None, _('start debugger')),
2984 2991 ('', 'traceback', None, _('print traceback on exception')),
2985 2992 ('', 'time', None, _('time how long the command takes')),
2986 2993 ('', 'profile', None, _('print command execution profile')),
2987 2994 ('', 'version', None, _('output version information and exit')),
2988 2995 ('h', 'help', None, _('display help and exit')),
2989 2996 ]
2990 2997
2991 2998 norepo = ("clone init version help debugancestor debugcomplete debugdata"
2992 2999 " debugindex debugindexdot")
2993 3000 optionalrepo = ("paths debugconfig")
2994 3001
2995 3002 def findpossible(cmd):
2996 3003 """
2997 3004 Return cmd -> (aliases, command table entry)
2998 3005 for each matching command
2999 3006 """
3000 3007 choice = {}
3001 3008 debugchoice = {}
3002 3009 for e in table.keys():
3003 3010 aliases = e.lstrip("^").split("|")
3004 3011 if cmd in aliases:
3005 3012 choice[cmd] = (aliases, table[e])
3006 3013 continue
3007 3014 for a in aliases:
3008 3015 if a.startswith(cmd):
3009 3016 if aliases[0].startswith("debug"):
3010 3017 debugchoice[a] = (aliases, table[e])
3011 3018 else:
3012 3019 choice[a] = (aliases, table[e])
3013 3020 break
3014 3021
3015 3022 if not choice and debugchoice:
3016 3023 choice = debugchoice
3017 3024
3018 3025 return choice
3019 3026
3020 3027 def find(cmd):
3021 3028 """Return (aliases, command table entry) for command string."""
3022 3029 choice = findpossible(cmd)
3023 3030
3024 3031 if choice.has_key(cmd):
3025 3032 return choice[cmd]
3026 3033
3027 3034 if len(choice) > 1:
3028 3035 clist = choice.keys()
3029 3036 clist.sort()
3030 3037 raise AmbiguousCommand(cmd, clist)
3031 3038
3032 3039 if choice:
3033 3040 return choice.values()[0]
3034 3041
3035 3042 raise UnknownCommand(cmd)
3036 3043
3037 3044 class SignalInterrupt(Exception):
3038 3045 """Exception raised on SIGTERM and SIGHUP."""
3039 3046
3040 3047 def catchterm(*args):
3041 3048 raise SignalInterrupt
3042 3049
3043 3050 def run():
3044 3051 sys.exit(dispatch(sys.argv[1:]))
3045 3052
3046 3053 class ParseError(Exception):
3047 3054 """Exception raised on errors in parsing the command line."""
3048 3055
3049 3056 def parse(ui, args):
3050 3057 options = {}
3051 3058 cmdoptions = {}
3052 3059
3053 3060 try:
3054 3061 args = fancyopts.fancyopts(args, globalopts, options)
3055 3062 except fancyopts.getopt.GetoptError, inst:
3056 3063 raise ParseError(None, inst)
3057 3064
3058 3065 if args:
3059 3066 cmd, args = args[0], args[1:]
3060 3067 aliases, i = find(cmd)
3061 3068 cmd = aliases[0]
3062 3069 defaults = ui.config("defaults", cmd)
3063 3070 if defaults:
3064 3071 args = defaults.split() + args
3065 3072 c = list(i[1])
3066 3073 else:
3067 3074 cmd = None
3068 3075 c = []
3069 3076
3070 3077 # combine global options into local
3071 3078 for o in globalopts:
3072 3079 c.append((o[0], o[1], options[o[1]], o[3]))
3073 3080
3074 3081 try:
3075 3082 args = fancyopts.fancyopts(args, c, cmdoptions)
3076 3083 except fancyopts.getopt.GetoptError, inst:
3077 3084 raise ParseError(cmd, inst)
3078 3085
3079 3086 # separate global options back out
3080 3087 for o in globalopts:
3081 3088 n = o[1]
3082 3089 options[n] = cmdoptions[n]
3083 3090 del cmdoptions[n]
3084 3091
3085 3092 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
3086 3093
3087 3094 def dispatch(args):
3088 3095 signal.signal(signal.SIGTERM, catchterm)
3089 3096 try:
3090 3097 signal.signal(signal.SIGHUP, catchterm)
3091 3098 except AttributeError:
3092 3099 pass
3093 3100
3094 3101 try:
3095 3102 u = ui.ui()
3096 3103 except util.Abort, inst:
3097 3104 sys.stderr.write(_("abort: %s\n") % inst)
3098 3105 sys.exit(1)
3099 3106
3100 3107 external = []
3101 3108 for x in u.extensions():
3102 3109 def on_exception(exc, inst):
3103 3110 u.warn(_("*** failed to import extension %s\n") % x[1])
3104 3111 u.warn("%s\n" % inst)
3105 3112 if "--traceback" in sys.argv[1:]:
3106 3113 traceback.print_exc()
3107 3114 if x[1]:
3108 3115 try:
3109 3116 mod = imp.load_source(x[0], x[1])
3110 3117 except Exception, inst:
3111 3118 on_exception(Exception, inst)
3112 3119 continue
3113 3120 else:
3114 3121 def importh(name):
3115 3122 mod = __import__(name)
3116 3123 components = name.split('.')
3117 3124 for comp in components[1:]:
3118 3125 mod = getattr(mod, comp)
3119 3126 return mod
3120 3127 try:
3121 3128 try:
3122 3129 mod = importh("hgext." + x[0])
3123 3130 except ImportError:
3124 3131 mod = importh(x[0])
3125 3132 except Exception, inst:
3126 3133 on_exception(Exception, inst)
3127 3134 continue
3128 3135
3129 3136 external.append(mod)
3130 3137 for x in external:
3131 3138 cmdtable = getattr(x, 'cmdtable', {})
3132 3139 for t in cmdtable:
3133 3140 if t in table:
3134 3141 u.warn(_("module %s overrides %s\n") % (x.__name__, t))
3135 3142 table.update(cmdtable)
3136 3143
3137 3144 try:
3138 3145 cmd, func, args, options, cmdoptions = parse(u, args)
3139 3146 if options["time"]:
3140 3147 def get_times():
3141 3148 t = os.times()
3142 3149 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
3143 3150 t = (t[0], t[1], t[2], t[3], time.clock())
3144 3151 return t
3145 3152 s = get_times()
3146 3153 def print_time():
3147 3154 t = get_times()
3148 3155 u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
3149 3156 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
3150 3157 atexit.register(print_time)
3151 3158
3152 3159 u.updateopts(options["verbose"], options["debug"], options["quiet"],
3153 3160 not options["noninteractive"])
3154 3161
3155 3162 # enter the debugger before command execution
3156 3163 if options['debugger']:
3157 3164 pdb.set_trace()
3158 3165
3159 3166 try:
3160 3167 if options['cwd']:
3161 3168 try:
3162 3169 os.chdir(options['cwd'])
3163 3170 except OSError, inst:
3164 3171 raise util.Abort('%s: %s' %
3165 3172 (options['cwd'], inst.strerror))
3166 3173
3167 3174 path = u.expandpath(options["repository"]) or ""
3168 3175 repo = path and hg.repository(u, path=path) or None
3169 3176
3170 3177 if options['help']:
3171 3178 help_(u, cmd, options['version'])
3172 3179 sys.exit(0)
3173 3180 elif options['version']:
3174 3181 show_version(u)
3175 3182 sys.exit(0)
3176 3183 elif not cmd:
3177 3184 help_(u, 'shortlist')
3178 3185 sys.exit(0)
3179 3186
3180 3187 if cmd not in norepo.split():
3181 3188 try:
3182 3189 if not repo:
3183 3190 repo = hg.repository(u, path=path)
3184 3191 u = repo.ui
3185 3192 for x in external:
3186 3193 if hasattr(x, 'reposetup'):
3187 3194 x.reposetup(u, repo)
3188 3195 except hg.RepoError:
3189 3196 if cmd not in optionalrepo.split():
3190 3197 raise
3191 3198 d = lambda: func(u, repo, *args, **cmdoptions)
3192 3199 else:
3193 3200 d = lambda: func(u, *args, **cmdoptions)
3194 3201
3195 3202 try:
3196 3203 if options['profile']:
3197 3204 import hotshot, hotshot.stats
3198 3205 prof = hotshot.Profile("hg.prof")
3199 3206 try:
3200 3207 try:
3201 3208 return prof.runcall(d)
3202 3209 except:
3203 3210 try:
3204 3211 u.warn(_('exception raised - generating '
3205 3212 'profile anyway\n'))
3206 3213 except:
3207 3214 pass
3208 3215 raise
3209 3216 finally:
3210 3217 prof.close()
3211 3218 stats = hotshot.stats.load("hg.prof")
3212 3219 stats.strip_dirs()
3213 3220 stats.sort_stats('time', 'calls')
3214 3221 stats.print_stats(40)
3215 3222 else:
3216 3223 return d()
3217 3224 finally:
3218 3225 u.flush()
3219 3226 except:
3220 3227 # enter the debugger when we hit an exception
3221 3228 if options['debugger']:
3222 3229 pdb.post_mortem(sys.exc_info()[2])
3223 3230 if options['traceback']:
3224 3231 traceback.print_exc()
3225 3232 raise
3226 3233 except ParseError, inst:
3227 3234 if inst.args[0]:
3228 3235 u.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
3229 3236 help_(u, inst.args[0])
3230 3237 else:
3231 3238 u.warn(_("hg: %s\n") % inst.args[1])
3232 3239 help_(u, 'shortlist')
3233 3240 sys.exit(-1)
3234 3241 except AmbiguousCommand, inst:
3235 3242 u.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
3236 3243 (inst.args[0], " ".join(inst.args[1])))
3237 3244 sys.exit(1)
3238 3245 except UnknownCommand, inst:
3239 3246 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
3240 3247 help_(u, 'shortlist')
3241 3248 sys.exit(1)
3242 3249 except hg.RepoError, inst:
3243 3250 u.warn(_("abort: "), inst, "!\n")
3244 3251 except revlog.RevlogError, inst:
3245 3252 u.warn(_("abort: "), inst, "!\n")
3246 3253 except SignalInterrupt:
3247 3254 u.warn(_("killed!\n"))
3248 3255 except KeyboardInterrupt:
3249 3256 try:
3250 3257 u.warn(_("interrupted!\n"))
3251 3258 except IOError, inst:
3252 3259 if inst.errno == errno.EPIPE:
3253 3260 if u.debugflag:
3254 3261 u.warn(_("\nbroken pipe\n"))
3255 3262 else:
3256 3263 raise
3257 3264 except IOError, inst:
3258 3265 if hasattr(inst, "code"):
3259 3266 u.warn(_("abort: %s\n") % inst)
3260 3267 elif hasattr(inst, "reason"):
3261 3268 u.warn(_("abort: error: %s\n") % inst.reason[1])
3262 3269 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
3263 3270 if u.debugflag:
3264 3271 u.warn(_("broken pipe\n"))
3265 3272 elif getattr(inst, "strerror", None):
3266 3273 if getattr(inst, "filename", None):
3267 3274 u.warn(_("abort: %s - %s\n") % (inst.strerror, inst.filename))
3268 3275 else:
3269 3276 u.warn(_("abort: %s\n") % inst.strerror)
3270 3277 else:
3271 3278 raise
3272 3279 except OSError, inst:
3273 3280 if hasattr(inst, "filename"):
3274 3281 u.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
3275 3282 else:
3276 3283 u.warn(_("abort: %s\n") % inst.strerror)
3277 3284 except util.Abort, inst:
3278 3285 u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n')
3279 3286 sys.exit(1)
3280 3287 except TypeError, inst:
3281 3288 # was this an argument error?
3282 3289 tb = traceback.extract_tb(sys.exc_info()[2])
3283 3290 if len(tb) > 2: # no
3284 3291 raise
3285 3292 u.debug(inst, "\n")
3286 3293 u.warn(_("%s: invalid arguments\n") % cmd)
3287 3294 help_(u, cmd)
3288 3295 except SystemExit:
3289 3296 # don't catch this in the catch-all below
3290 3297 raise
3291 3298 except:
3292 3299 u.warn(_("** unknown exception encountered, details follow\n"))
3293 3300 u.warn(_("** report bug details to mercurial@selenic.com\n"))
3294 3301 u.warn(_("** Mercurial Distributed SCM (version %s)\n")
3295 3302 % version.get_version())
3296 3303 raise
3297 3304
3298 3305 sys.exit(-1)
@@ -1,1911 +1,1894 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 import struct, os, util
8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "changegroup")
14 15
15 16 class localrepository(object):
16 17 def __del__(self):
17 18 self.transhandle = None
18 19 def __init__(self, parentui, path=None, create=0):
19 20 if not path:
20 21 p = os.getcwd()
21 22 while not os.path.isdir(os.path.join(p, ".hg")):
22 23 oldp = p
23 24 p = os.path.dirname(p)
24 25 if p == oldp:
25 26 raise repo.RepoError(_("no repo found"))
26 27 path = p
27 28 self.path = os.path.join(path, ".hg")
28 29
29 30 if not create and not os.path.isdir(self.path):
30 31 raise repo.RepoError(_("repository %s not found") % path)
31 32
32 33 self.root = os.path.abspath(path)
33 34 self.ui = ui.ui(parentui=parentui)
34 35 self.opener = util.opener(self.path)
35 36 self.wopener = util.opener(self.root)
36 37 self.manifest = manifest.manifest(self.opener)
37 38 self.changelog = changelog.changelog(self.opener)
38 39 self.tagscache = None
39 40 self.nodetagscache = None
40 41 self.encodepats = None
41 42 self.decodepats = None
42 43 self.transhandle = None
43 44
44 45 if create:
45 46 os.mkdir(self.path)
46 47 os.mkdir(self.join("data"))
47 48
48 49 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 50 try:
50 51 self.ui.readconfig(self.join("hgrc"), self.root)
51 52 except IOError:
52 53 pass
53 54
54 55 def hook(self, name, throw=False, **args):
55 56 def runhook(name, cmd):
56 57 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 58 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
58 59 [(k.upper(), v) for k, v in args.iteritems()])
59 60 r = util.system(cmd, environ=env, cwd=self.root)
60 61 if r:
61 62 desc, r = util.explain_exit(r)
62 63 if throw:
63 64 raise util.Abort(_('%s hook %s') % (name, desc))
64 65 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
65 66 return False
66 67 return True
67 68
68 69 r = True
69 70 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
70 71 if hname.split(".", 1)[0] == name and cmd]
71 72 hooks.sort()
72 73 for hname, cmd in hooks:
73 74 r = runhook(hname, cmd) and r
74 75 return r
75 76
76 77 def tags(self):
77 78 '''return a mapping of tag to node'''
78 79 if not self.tagscache:
79 80 self.tagscache = {}
80 81 def addtag(self, k, n):
81 82 try:
82 83 bin_n = bin(n)
83 84 except TypeError:
84 85 bin_n = ''
85 86 self.tagscache[k.strip()] = bin_n
86 87
87 88 try:
88 89 # read each head of the tags file, ending with the tip
89 90 # and add each tag found to the map, with "newer" ones
90 91 # taking precedence
91 92 fl = self.file(".hgtags")
92 93 h = fl.heads()
93 94 h.reverse()
94 95 for r in h:
95 96 for l in fl.read(r).splitlines():
96 97 if l:
97 98 n, k = l.split(" ", 1)
98 99 addtag(self, k, n)
99 100 except KeyError:
100 101 pass
101 102
102 103 try:
103 104 f = self.opener("localtags")
104 105 for l in f:
105 106 n, k = l.split(" ", 1)
106 107 addtag(self, k, n)
107 108 except IOError:
108 109 pass
109 110
110 111 self.tagscache['tip'] = self.changelog.tip()
111 112
112 113 return self.tagscache
113 114
114 115 def tagslist(self):
115 116 '''return a list of tags ordered by revision'''
116 117 l = []
117 118 for t, n in self.tags().items():
118 119 try:
119 120 r = self.changelog.rev(n)
120 121 except:
121 122 r = -2 # sort to the beginning of the list if unknown
122 123 l.append((r, t, n))
123 124 l.sort()
124 125 return [(t, n) for r, t, n in l]
125 126
126 127 def nodetags(self, node):
127 128 '''return the tags associated with a node'''
128 129 if not self.nodetagscache:
129 130 self.nodetagscache = {}
130 131 for t, n in self.tags().items():
131 132 self.nodetagscache.setdefault(n, []).append(t)
132 133 return self.nodetagscache.get(node, [])
133 134
134 135 def lookup(self, key):
135 136 try:
136 137 return self.tags()[key]
137 138 except KeyError:
138 139 try:
139 140 return self.changelog.lookup(key)
140 141 except:
141 142 raise repo.RepoError(_("unknown revision '%s'") % key)
142 143
143 144 def dev(self):
144 145 return os.stat(self.path).st_dev
145 146
146 147 def local(self):
147 148 return True
148 149
149 150 def join(self, f):
150 151 return os.path.join(self.path, f)
151 152
152 153 def wjoin(self, f):
153 154 return os.path.join(self.root, f)
154 155
155 156 def file(self, f):
156 157 if f[0] == '/':
157 158 f = f[1:]
158 159 return filelog.filelog(self.opener, f)
159 160
160 161 def getcwd(self):
161 162 return self.dirstate.getcwd()
162 163
163 164 def wfile(self, f, mode='r'):
164 165 return self.wopener(f, mode)
165 166
166 167 def wread(self, filename):
167 168 if self.encodepats == None:
168 169 l = []
169 170 for pat, cmd in self.ui.configitems("encode"):
170 171 mf = util.matcher(self.root, "", [pat], [], [])[1]
171 172 l.append((mf, cmd))
172 173 self.encodepats = l
173 174
174 175 data = self.wopener(filename, 'r').read()
175 176
176 177 for mf, cmd in self.encodepats:
177 178 if mf(filename):
178 179 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
179 180 data = util.filter(data, cmd)
180 181 break
181 182
182 183 return data
183 184
184 185 def wwrite(self, filename, data, fd=None):
185 186 if self.decodepats == None:
186 187 l = []
187 188 for pat, cmd in self.ui.configitems("decode"):
188 189 mf = util.matcher(self.root, "", [pat], [], [])[1]
189 190 l.append((mf, cmd))
190 191 self.decodepats = l
191 192
192 193 for mf, cmd in self.decodepats:
193 194 if mf(filename):
194 195 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
195 196 data = util.filter(data, cmd)
196 197 break
197 198
198 199 if fd:
199 200 return fd.write(data)
200 201 return self.wopener(filename, 'w').write(data)
201 202
202 203 def transaction(self):
203 204 tr = self.transhandle
204 205 if tr != None and tr.running():
205 206 return tr.nest()
206 207
207 208 # save dirstate for undo
208 209 try:
209 210 ds = self.opener("dirstate").read()
210 211 except IOError:
211 212 ds = ""
212 213 self.opener("journal.dirstate", "w").write(ds)
213 214
214 215 tr = transaction.transaction(self.ui.warn, self.opener,
215 216 self.join("journal"),
216 217 aftertrans(self.path))
217 218 self.transhandle = tr
218 219 return tr
219 220
220 221 def recover(self):
221 222 l = self.lock()
222 223 if os.path.exists(self.join("journal")):
223 224 self.ui.status(_("rolling back interrupted transaction\n"))
224 225 transaction.rollback(self.opener, self.join("journal"))
225 226 self.reload()
226 227 return True
227 228 else:
228 229 self.ui.warn(_("no interrupted transaction available\n"))
229 230 return False
230 231
231 232 def undo(self, wlock=None):
232 233 if not wlock:
233 234 wlock = self.wlock()
234 235 l = self.lock()
235 236 if os.path.exists(self.join("undo")):
236 237 self.ui.status(_("rolling back last transaction\n"))
237 238 transaction.rollback(self.opener, self.join("undo"))
238 239 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
239 240 self.reload()
240 241 self.wreload()
241 242 else:
242 243 self.ui.warn(_("no undo information available\n"))
243 244
244 245 def wreload(self):
245 246 self.dirstate.read()
246 247
247 248 def reload(self):
248 249 self.changelog.load()
249 250 self.manifest.load()
250 251 self.tagscache = None
251 252 self.nodetagscache = None
252 253
253 254 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
254 255 try:
255 256 l = lock.lock(self.join(lockname), 0, releasefn)
256 257 except lock.LockHeld, inst:
257 258 if not wait:
258 259 raise inst
259 260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
260 261 try:
261 262 # default to 600 seconds timeout
262 263 l = lock.lock(self.join(lockname),
263 264 int(self.ui.config("ui", "timeout") or 600),
264 265 releasefn)
265 266 except lock.LockHeld, inst:
266 267 raise util.Abort(_("timeout while waiting for "
267 268 "lock held by %s") % inst.args[0])
268 269 if acquirefn:
269 270 acquirefn()
270 271 return l
271 272
272 273 def lock(self, wait=1):
273 274 return self.do_lock("lock", wait, acquirefn=self.reload)
274 275
275 276 def wlock(self, wait=1):
276 277 return self.do_lock("wlock", wait,
277 278 self.dirstate.write,
278 279 self.wreload)
279 280
280 281 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
281 282 "determine whether a new filenode is needed"
282 283 fp1 = manifest1.get(filename, nullid)
283 284 fp2 = manifest2.get(filename, nullid)
284 285
285 286 if fp2 != nullid:
286 287 # is one parent an ancestor of the other?
287 288 fpa = filelog.ancestor(fp1, fp2)
288 289 if fpa == fp1:
289 290 fp1, fp2 = fp2, nullid
290 291 elif fpa == fp2:
291 292 fp2 = nullid
292 293
293 294 # is the file unmodified from the parent? report existing entry
294 295 if fp2 == nullid and text == filelog.read(fp1):
295 296 return (fp1, None, None)
296 297
297 298 return (None, fp1, fp2)
298 299
299 300 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
300 301 orig_parent = self.dirstate.parents()[0] or nullid
301 302 p1 = p1 or self.dirstate.parents()[0] or nullid
302 303 p2 = p2 or self.dirstate.parents()[1] or nullid
303 304 c1 = self.changelog.read(p1)
304 305 c2 = self.changelog.read(p2)
305 306 m1 = self.manifest.read(c1[0])
306 307 mf1 = self.manifest.readflags(c1[0])
307 308 m2 = self.manifest.read(c2[0])
308 309 changed = []
309 310
310 311 if orig_parent == p1:
311 312 update_dirstate = 1
312 313 else:
313 314 update_dirstate = 0
314 315
315 316 if not wlock:
316 317 wlock = self.wlock()
317 318 l = self.lock()
318 319 tr = self.transaction()
319 320 mm = m1.copy()
320 321 mfm = mf1.copy()
321 322 linkrev = self.changelog.count()
322 323 for f in files:
323 324 try:
324 325 t = self.wread(f)
325 326 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
326 327 r = self.file(f)
327 328 mfm[f] = tm
328 329
329 330 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
330 331 if entry:
331 332 mm[f] = entry
332 333 continue
333 334
334 335 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
335 336 changed.append(f)
336 337 if update_dirstate:
337 338 self.dirstate.update([f], "n")
338 339 except IOError:
339 340 try:
340 341 del mm[f]
341 342 del mfm[f]
342 343 if update_dirstate:
343 344 self.dirstate.forget([f])
344 345 except:
345 346 # deleted from p2?
346 347 pass
347 348
348 349 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
349 350 user = user or self.ui.username()
350 351 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
351 352 tr.close()
352 353 if update_dirstate:
353 354 self.dirstate.setparents(n, nullid)
354 355
355 356 def commit(self, files=None, text="", user=None, date=None,
356 357 match=util.always, force=False, lock=None, wlock=None):
357 358 commit = []
358 359 remove = []
359 360 changed = []
360 361
361 362 if files:
362 363 for f in files:
363 364 s = self.dirstate.state(f)
364 365 if s in 'nmai':
365 366 commit.append(f)
366 367 elif s == 'r':
367 368 remove.append(f)
368 369 else:
369 370 self.ui.warn(_("%s not tracked!\n") % f)
370 371 else:
371 372 modified, added, removed, deleted, unknown = self.changes(match=match)
372 373 commit = modified + added
373 374 remove = removed
374 375
375 376 p1, p2 = self.dirstate.parents()
376 377 c1 = self.changelog.read(p1)
377 378 c2 = self.changelog.read(p2)
378 379 m1 = self.manifest.read(c1[0])
379 380 mf1 = self.manifest.readflags(c1[0])
380 381 m2 = self.manifest.read(c2[0])
381 382
382 383 if not commit and not remove and not force and p2 == nullid:
383 384 self.ui.status(_("nothing changed\n"))
384 385 return None
385 386
386 387 xp1 = hex(p1)
387 388 if p2 == nullid: xp2 = ''
388 389 else: xp2 = hex(p2)
389 390
390 391 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
391 392
392 393 if not wlock:
393 394 wlock = self.wlock()
394 395 if not lock:
395 396 lock = self.lock()
396 397 tr = self.transaction()
397 398
398 399 # check in files
399 400 new = {}
400 401 linkrev = self.changelog.count()
401 402 commit.sort()
402 403 for f in commit:
403 404 self.ui.note(f + "\n")
404 405 try:
405 406 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
406 407 t = self.wread(f)
407 408 except IOError:
408 409 self.ui.warn(_("trouble committing %s!\n") % f)
409 410 raise
410 411
411 412 r = self.file(f)
412 413
413 414 meta = {}
414 415 cp = self.dirstate.copied(f)
415 416 if cp:
416 417 meta["copy"] = cp
417 418 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
418 419 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
419 420 fp1, fp2 = nullid, nullid
420 421 else:
421 422 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
422 423 if entry:
423 424 new[f] = entry
424 425 continue
425 426
426 427 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
427 428 # remember what we've added so that we can later calculate
428 429 # the files to pull from a set of changesets
429 430 changed.append(f)
430 431
431 432 # update manifest
432 433 m1 = m1.copy()
433 434 m1.update(new)
434 435 for f in remove:
435 436 if f in m1:
436 437 del m1[f]
437 438 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
438 439 (new, remove))
439 440
440 441 # add changeset
441 442 new = new.keys()
442 443 new.sort()
443 444
444 445 if not text:
445 446 edittext = [""]
446 447 if p2 != nullid:
447 448 edittext.append("HG: branch merge")
448 449 edittext.extend(["HG: changed %s" % f for f in changed])
449 450 edittext.extend(["HG: removed %s" % f for f in remove])
450 451 if not changed and not remove:
451 452 edittext.append("HG: no files changed")
452 453 edittext.append("")
453 454 # run editor in the repository root
454 455 olddir = os.getcwd()
455 456 os.chdir(self.root)
456 457 edittext = self.ui.edit("\n".join(edittext))
457 458 os.chdir(olddir)
458 459 if not edittext.rstrip():
459 460 return None
460 461 text = edittext
461 462
462 463 user = user or self.ui.username()
463 464 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
464 465 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
465 466 parent2=xp2)
466 467 tr.close()
467 468
468 469 self.dirstate.setparents(n)
469 470 self.dirstate.update(new, "n")
470 471 self.dirstate.forget(remove)
471 472
472 473 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
473 474 return n
474 475
475 476 def walk(self, node=None, files=[], match=util.always):
476 477 if node:
477 478 fdict = dict.fromkeys(files)
478 479 for fn in self.manifest.read(self.changelog.read(node)[0]):
479 480 fdict.pop(fn, None)
480 481 if match(fn):
481 482 yield 'm', fn
482 483 for fn in fdict:
483 484 self.ui.warn(_('%s: No such file in rev %s\n') % (
484 485 util.pathto(self.getcwd(), fn), short(node)))
485 486 else:
486 487 for src, fn in self.dirstate.walk(files, match):
487 488 yield src, fn
488 489
489 490 def changes(self, node1=None, node2=None, files=[], match=util.always,
490 491 wlock=None):
491 492 """return changes between two nodes or node and working directory
492 493
493 494 If node1 is None, use the first dirstate parent instead.
494 495 If node2 is None, compare node1 with working directory.
495 496 """
496 497
497 498 def fcmp(fn, mf):
498 499 t1 = self.wread(fn)
499 500 t2 = self.file(fn).read(mf.get(fn, nullid))
500 501 return cmp(t1, t2)
501 502
502 503 def mfmatches(node):
503 504 change = self.changelog.read(node)
504 505 mf = dict(self.manifest.read(change[0]))
505 506 for fn in mf.keys():
506 507 if not match(fn):
507 508 del mf[fn]
508 509 return mf
509 510
510 511 if node1:
511 512 # read the manifest from node1 before the manifest from node2,
512 513 # so that we'll hit the manifest cache if we're going through
513 514 # all the revisions in parent->child order.
514 515 mf1 = mfmatches(node1)
515 516
516 517 # are we comparing the working directory?
517 518 if not node2:
518 519 if not wlock:
519 520 try:
520 521 wlock = self.wlock(wait=0)
521 522 except lock.LockException:
522 523 wlock = None
523 524 lookup, modified, added, removed, deleted, unknown = (
524 525 self.dirstate.changes(files, match))
525 526
526 527 # are we comparing working dir against its parent?
527 528 if not node1:
528 529 if lookup:
529 530 # do a full compare of any files that might have changed
530 531 mf2 = mfmatches(self.dirstate.parents()[0])
531 532 for f in lookup:
532 533 if fcmp(f, mf2):
533 534 modified.append(f)
534 535 elif wlock is not None:
535 536 self.dirstate.update([f], "n")
536 537 else:
537 538 # we are comparing working dir against non-parent
538 539 # generate a pseudo-manifest for the working dir
539 540 mf2 = mfmatches(self.dirstate.parents()[0])
540 541 for f in lookup + modified + added:
541 542 mf2[f] = ""
542 543 for f in removed:
543 544 if f in mf2:
544 545 del mf2[f]
545 546 else:
546 547 # we are comparing two revisions
547 548 deleted, unknown = [], []
548 549 mf2 = mfmatches(node2)
549 550
550 551 if node1:
551 552 # flush lists from dirstate before comparing manifests
552 553 modified, added = [], []
553 554
554 555 for fn in mf2:
555 556 if mf1.has_key(fn):
556 557 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
557 558 modified.append(fn)
558 559 del mf1[fn]
559 560 else:
560 561 added.append(fn)
561 562
562 563 removed = mf1.keys()
563 564
564 565 # sort and return results:
565 566 for l in modified, added, removed, deleted, unknown:
566 567 l.sort()
567 568 return (modified, added, removed, deleted, unknown)
568 569
569 570 def add(self, list, wlock=None):
570 571 if not wlock:
571 572 wlock = self.wlock()
572 573 for f in list:
573 574 p = self.wjoin(f)
574 575 if not os.path.exists(p):
575 576 self.ui.warn(_("%s does not exist!\n") % f)
576 577 elif not os.path.isfile(p):
577 578 self.ui.warn(_("%s not added: only files supported currently\n")
578 579 % f)
579 580 elif self.dirstate.state(f) in 'an':
580 581 self.ui.warn(_("%s already tracked!\n") % f)
581 582 else:
582 583 self.dirstate.update([f], "a")
583 584
584 585 def forget(self, list, wlock=None):
585 586 if not wlock:
586 587 wlock = self.wlock()
587 588 for f in list:
588 589 if self.dirstate.state(f) not in 'ai':
589 590 self.ui.warn(_("%s not added!\n") % f)
590 591 else:
591 592 self.dirstate.forget([f])
592 593
593 594 def remove(self, list, unlink=False, wlock=None):
594 595 if unlink:
595 596 for f in list:
596 597 try:
597 598 util.unlink(self.wjoin(f))
598 599 except OSError, inst:
599 600 if inst.errno != errno.ENOENT:
600 601 raise
601 602 if not wlock:
602 603 wlock = self.wlock()
603 604 for f in list:
604 605 p = self.wjoin(f)
605 606 if os.path.exists(p):
606 607 self.ui.warn(_("%s still exists!\n") % f)
607 608 elif self.dirstate.state(f) == 'a':
608 609 self.dirstate.forget([f])
609 610 elif f not in self.dirstate:
610 611 self.ui.warn(_("%s not tracked!\n") % f)
611 612 else:
612 613 self.dirstate.update([f], "r")
613 614
614 615 def undelete(self, list, wlock=None):
615 616 p = self.dirstate.parents()[0]
616 617 mn = self.changelog.read(p)[0]
617 618 mf = self.manifest.readflags(mn)
618 619 m = self.manifest.read(mn)
619 620 if not wlock:
620 621 wlock = self.wlock()
621 622 for f in list:
622 623 if self.dirstate.state(f) not in "r":
623 624 self.ui.warn("%s not removed!\n" % f)
624 625 else:
625 626 t = self.file(f).read(m[f])
626 627 self.wwrite(f, t)
627 628 util.set_exec(self.wjoin(f), mf[f])
628 629 self.dirstate.update([f], "n")
629 630
630 631 def copy(self, source, dest, wlock=None):
631 632 p = self.wjoin(dest)
632 633 if not os.path.exists(p):
633 634 self.ui.warn(_("%s does not exist!\n") % dest)
634 635 elif not os.path.isfile(p):
635 636 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
636 637 else:
637 638 if not wlock:
638 639 wlock = self.wlock()
639 640 if self.dirstate.state(dest) == '?':
640 641 self.dirstate.update([dest], "a")
641 642 self.dirstate.copy(source, dest)
642 643
643 644 def heads(self, start=None):
644 645 heads = self.changelog.heads(start)
645 646 # sort the output in rev descending order
646 647 heads = [(-self.changelog.rev(h), h) for h in heads]
647 648 heads.sort()
648 649 return [n for (r, n) in heads]
649 650
650 651 # branchlookup returns a dict giving a list of branches for
651 652 # each head. A branch is defined as the tag of a node or
652 653 # the branch of the node's parents. If a node has multiple
653 654 # branch tags, tags are eliminated if they are visible from other
654 655 # branch tags.
655 656 #
656 657 # So, for this graph: a->b->c->d->e
657 658 # \ /
658 659 # aa -----/
659 660 # a has tag 2.6.12
660 661 # d has tag 2.6.13
661 662 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
662 663 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
663 664 # from the list.
664 665 #
665 666 # It is possible that more than one head will have the same branch tag.
666 667 # callers need to check the result for multiple heads under the same
667 668 # branch tag if that is a problem for them (ie checkout of a specific
668 669 # branch).
669 670 #
670 671 # passing in a specific branch will limit the depth of the search
671 672 # through the parents. It won't limit the branches returned in the
672 673 # result though.
673 674 def branchlookup(self, heads=None, branch=None):
674 675 if not heads:
675 676 heads = self.heads()
676 677 headt = [ h for h in heads ]
677 678 chlog = self.changelog
678 679 branches = {}
679 680 merges = []
680 681 seenmerge = {}
681 682
682 683 # traverse the tree once for each head, recording in the branches
683 684 # dict which tags are visible from this head. The branches
684 685 # dict also records which tags are visible from each tag
685 686 # while we traverse.
686 687 while headt or merges:
687 688 if merges:
688 689 n, found = merges.pop()
689 690 visit = [n]
690 691 else:
691 692 h = headt.pop()
692 693 visit = [h]
693 694 found = [h]
694 695 seen = {}
695 696 while visit:
696 697 n = visit.pop()
697 698 if n in seen:
698 699 continue
699 700 pp = chlog.parents(n)
700 701 tags = self.nodetags(n)
701 702 if tags:
702 703 for x in tags:
703 704 if x == 'tip':
704 705 continue
705 706 for f in found:
706 707 branches.setdefault(f, {})[n] = 1
707 708 branches.setdefault(n, {})[n] = 1
708 709 break
709 710 if n not in found:
710 711 found.append(n)
711 712 if branch in tags:
712 713 continue
713 714 seen[n] = 1
714 715 if pp[1] != nullid and n not in seenmerge:
715 716 merges.append((pp[1], [x for x in found]))
716 717 seenmerge[n] = 1
717 718 if pp[0] != nullid:
718 719 visit.append(pp[0])
719 720 # traverse the branches dict, eliminating branch tags from each
720 721 # head that are visible from another branch tag for that head.
721 722 out = {}
722 723 viscache = {}
723 724 for h in heads:
724 725 def visible(node):
725 726 if node in viscache:
726 727 return viscache[node]
727 728 ret = {}
728 729 visit = [node]
729 730 while visit:
730 731 x = visit.pop()
731 732 if x in viscache:
732 733 ret.update(viscache[x])
733 734 elif x not in ret:
734 735 ret[x] = 1
735 736 if x in branches:
736 737 visit[len(visit):] = branches[x].keys()
737 738 viscache[node] = ret
738 739 return ret
739 740 if h not in branches:
740 741 continue
741 742 # O(n^2), but somewhat limited. This only searches the
742 743 # tags visible from a specific head, not all the tags in the
743 744 # whole repo.
744 745 for b in branches[h]:
745 746 vis = False
746 747 for bb in branches[h].keys():
747 748 if b != bb:
748 749 if b in visible(bb):
749 750 vis = True
750 751 break
751 752 if not vis:
752 753 l = out.setdefault(h, [])
753 754 l[len(l):] = self.nodetags(b)
754 755 return out
755 756
756 757 def branches(self, nodes):
757 758 if not nodes:
758 759 nodes = [self.changelog.tip()]
759 760 b = []
760 761 for n in nodes:
761 762 t = n
762 763 while n:
763 764 p = self.changelog.parents(n)
764 765 if p[1] != nullid or p[0] == nullid:
765 766 b.append((t, n, p[0], p[1]))
766 767 break
767 768 n = p[0]
768 769 return b
769 770
770 771 def between(self, pairs):
771 772 r = []
772 773
773 774 for top, bottom in pairs:
774 775 n, l, i = top, [], 0
775 776 f = 1
776 777
777 778 while n != bottom:
778 779 p = self.changelog.parents(n)[0]
779 780 if i == f:
780 781 l.append(n)
781 782 f = f * 2
782 783 n = p
783 784 i += 1
784 785
785 786 r.append(l)
786 787
787 788 return r
788 789
789 790 def findincoming(self, remote, base=None, heads=None, force=False):
790 791 m = self.changelog.nodemap
791 792 search = []
792 793 fetch = {}
793 794 seen = {}
794 795 seenbranch = {}
795 796 if base == None:
796 797 base = {}
797 798
798 799 # assume we're closer to the tip than the root
799 800 # and start by examining the heads
800 801 self.ui.status(_("searching for changes\n"))
801 802
802 803 if not heads:
803 804 heads = remote.heads()
804 805
805 806 unknown = []
806 807 for h in heads:
807 808 if h not in m:
808 809 unknown.append(h)
809 810 else:
810 811 base[h] = 1
811 812
812 813 if not unknown:
813 814 return []
814 815
815 816 rep = {}
816 817 reqcnt = 0
817 818
818 819 # search through remote branches
819 820 # a 'branch' here is a linear segment of history, with four parts:
820 821 # head, root, first parent, second parent
821 822 # (a branch always has two parents (or none) by definition)
822 823 unknown = remote.branches(unknown)
823 824 while unknown:
824 825 r = []
825 826 while unknown:
826 827 n = unknown.pop(0)
827 828 if n[0] in seen:
828 829 continue
829 830
830 831 self.ui.debug(_("examining %s:%s\n")
831 832 % (short(n[0]), short(n[1])))
832 833 if n[0] == nullid:
833 834 break
834 835 if n in seenbranch:
835 836 self.ui.debug(_("branch already found\n"))
836 837 continue
837 838 if n[1] and n[1] in m: # do we know the base?
838 839 self.ui.debug(_("found incomplete branch %s:%s\n")
839 840 % (short(n[0]), short(n[1])))
840 841 search.append(n) # schedule branch range for scanning
841 842 seenbranch[n] = 1
842 843 else:
843 844 if n[1] not in seen and n[1] not in fetch:
844 845 if n[2] in m and n[3] in m:
845 846 self.ui.debug(_("found new changeset %s\n") %
846 847 short(n[1]))
847 848 fetch[n[1]] = 1 # earliest unknown
848 849 base[n[2]] = 1 # latest known
849 850 continue
850 851
851 852 for a in n[2:4]:
852 853 if a not in rep:
853 854 r.append(a)
854 855 rep[a] = 1
855 856
856 857 seen[n[0]] = 1
857 858
858 859 if r:
859 860 reqcnt += 1
860 861 self.ui.debug(_("request %d: %s\n") %
861 862 (reqcnt, " ".join(map(short, r))))
862 863 for p in range(0, len(r), 10):
863 864 for b in remote.branches(r[p:p+10]):
864 865 self.ui.debug(_("received %s:%s\n") %
865 866 (short(b[0]), short(b[1])))
866 867 if b[0] in m:
867 868 self.ui.debug(_("found base node %s\n")
868 869 % short(b[0]))
869 870 base[b[0]] = 1
870 871 elif b[0] not in seen:
871 872 unknown.append(b)
872 873
873 874 # do binary search on the branches we found
874 875 while search:
875 876 n = search.pop(0)
876 877 reqcnt += 1
877 878 l = remote.between([(n[0], n[1])])[0]
878 879 l.append(n[1])
879 880 p = n[0]
880 881 f = 1
881 882 for i in l:
882 883 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
883 884 if i in m:
884 885 if f <= 2:
885 886 self.ui.debug(_("found new branch changeset %s\n") %
886 887 short(p))
887 888 fetch[p] = 1
888 889 base[i] = 1
889 890 else:
890 891 self.ui.debug(_("narrowed branch search to %s:%s\n")
891 892 % (short(p), short(i)))
892 893 search.append((p, i))
893 894 break
894 895 p, f = i, f * 2
895 896
896 897 # sanity check our fetch list
897 898 for f in fetch.keys():
898 899 if f in m:
899 900 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
900 901
901 902 if base.keys() == [nullid]:
902 903 if force:
903 904 self.ui.warn(_("warning: repository is unrelated\n"))
904 905 else:
905 906 raise util.Abort(_("repository is unrelated"))
906 907
907 908 self.ui.note(_("found new changesets starting at ") +
908 909 " ".join([short(f) for f in fetch]) + "\n")
909 910
910 911 self.ui.debug(_("%d total queries\n") % reqcnt)
911 912
912 913 return fetch.keys()
913 914
914 915 def findoutgoing(self, remote, base=None, heads=None, force=False):
915 916 if base == None:
916 917 base = {}
917 918 self.findincoming(remote, base, heads, force=force)
918 919
919 920 self.ui.debug(_("common changesets up to ")
920 921 + " ".join(map(short, base.keys())) + "\n")
921 922
922 923 remain = dict.fromkeys(self.changelog.nodemap)
923 924
924 925 # prune everything remote has from the tree
925 926 del remain[nullid]
926 927 remove = base.keys()
927 928 while remove:
928 929 n = remove.pop(0)
929 930 if n in remain:
930 931 del remain[n]
931 932 for p in self.changelog.parents(n):
932 933 remove.append(p)
933 934
934 935 # find every node whose parents have been pruned
935 936 subset = []
936 937 for n in remain:
937 938 p1, p2 = self.changelog.parents(n)
938 939 if p1 not in remain and p2 not in remain:
939 940 subset.append(n)
940 941
941 942 # this is the set of all roots we have to push
942 943 return subset
943 944
944 945 def pull(self, remote, heads=None, force=False):
945 946 l = self.lock()
946 947
947 948 # if we have an empty repo, fetch everything
948 949 if self.changelog.tip() == nullid:
949 950 self.ui.status(_("requesting all changes\n"))
950 951 fetch = [nullid]
951 952 else:
952 953 fetch = self.findincoming(remote, force=force)
953 954
954 955 if not fetch:
955 956 self.ui.status(_("no changes found\n"))
956 957 return 1
957 958
958 959 if heads is None:
959 960 cg = remote.changegroup(fetch, 'pull')
960 961 else:
961 962 cg = remote.changegroupsubset(fetch, heads, 'pull')
962 963 return self.addchangegroup(cg)
963 964
964 965 def push(self, remote, force=False, revs=None):
965 966 lock = remote.lock()
966 967
967 968 base = {}
968 969 heads = remote.heads()
969 970 inc = self.findincoming(remote, base, heads, force=force)
970 971 if not force and inc:
971 972 self.ui.warn(_("abort: unsynced remote changes!\n"))
972 973 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
973 974 return 1
974 975
975 976 update = self.findoutgoing(remote, base)
976 977 if revs is not None:
977 978 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
978 979 else:
979 980 bases, heads = update, self.changelog.heads()
980 981
981 982 if not bases:
982 983 self.ui.status(_("no changes found\n"))
983 984 return 1
984 985 elif not force:
985 986 if len(bases) < len(heads):
986 987 self.ui.warn(_("abort: push creates new remote branches!\n"))
987 988 self.ui.status(_("(did you forget to merge?"
988 989 " use push -f to force)\n"))
989 990 return 1
990 991
991 992 if revs is None:
992 993 cg = self.changegroup(update, 'push')
993 994 else:
994 995 cg = self.changegroupsubset(update, revs, 'push')
995 996 return remote.addchangegroup(cg)
996 997
997 998 def changegroupsubset(self, bases, heads, source):
998 999 """This function generates a changegroup consisting of all the nodes
999 1000 that are descendents of any of the bases, and ancestors of any of
1000 1001 the heads.
1001 1002
1002 1003 It is fairly complex as determining which filenodes and which
1003 1004 manifest nodes need to be included for the changeset to be complete
1004 1005 is non-trivial.
1005 1006
1006 1007 Another wrinkle is doing the reverse, figuring out which changeset in
1007 1008 the changegroup a particular filenode or manifestnode belongs to."""
1008 1009
1009 1010 self.hook('preoutgoing', throw=True, source=source)
1010 1011
1011 1012 # Set up some initial variables
1012 1013 # Make it easy to refer to self.changelog
1013 1014 cl = self.changelog
1014 1015 # msng is short for missing - compute the list of changesets in this
1015 1016 # changegroup.
1016 1017 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1017 1018 # Some bases may turn out to be superfluous, and some heads may be
1018 1019 # too. nodesbetween will return the minimal set of bases and heads
1019 1020 # necessary to re-create the changegroup.
1020 1021
1021 1022 # Known heads are the list of heads that it is assumed the recipient
1022 1023 # of this changegroup will know about.
1023 1024 knownheads = {}
1024 1025 # We assume that all parents of bases are known heads.
1025 1026 for n in bases:
1026 1027 for p in cl.parents(n):
1027 1028 if p != nullid:
1028 1029 knownheads[p] = 1
1029 1030 knownheads = knownheads.keys()
1030 1031 if knownheads:
1031 1032 # Now that we know what heads are known, we can compute which
1032 1033 # changesets are known. The recipient must know about all
1033 1034 # changesets required to reach the known heads from the null
1034 1035 # changeset.
1035 1036 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1036 1037 junk = None
1037 1038 # Transform the list into an ersatz set.
1038 1039 has_cl_set = dict.fromkeys(has_cl_set)
1039 1040 else:
1040 1041 # If there were no known heads, the recipient cannot be assumed to
1041 1042 # know about any changesets.
1042 1043 has_cl_set = {}
1043 1044
1044 1045 # Make it easy to refer to self.manifest
1045 1046 mnfst = self.manifest
1046 1047 # We don't know which manifests are missing yet
1047 1048 msng_mnfst_set = {}
1048 1049 # Nor do we know which filenodes are missing.
1049 1050 msng_filenode_set = {}
1050 1051
1051 1052 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1052 1053 junk = None
1053 1054
1054 1055 # A changeset always belongs to itself, so the changenode lookup
1055 1056 # function for a changenode is identity.
1056 1057 def identity(x):
1057 1058 return x
1058 1059
1059 1060 # A function generating function. Sets up an environment for the
1060 1061 # inner function.
1061 1062 def cmp_by_rev_func(revlog):
1062 1063 # Compare two nodes by their revision number in the environment's
1063 1064 # revision history. Since the revision number both represents the
1064 1065 # most efficient order to read the nodes in, and represents a
1065 1066 # topological sorting of the nodes, this function is often useful.
1066 1067 def cmp_by_rev(a, b):
1067 1068 return cmp(revlog.rev(a), revlog.rev(b))
1068 1069 return cmp_by_rev
1069 1070
1070 1071 # If we determine that a particular file or manifest node must be a
1071 1072 # node that the recipient of the changegroup will already have, we can
1072 1073 # also assume the recipient will have all the parents. This function
1073 1074 # prunes them from the set of missing nodes.
1074 1075 def prune_parents(revlog, hasset, msngset):
1075 1076 haslst = hasset.keys()
1076 1077 haslst.sort(cmp_by_rev_func(revlog))
1077 1078 for node in haslst:
1078 1079 parentlst = [p for p in revlog.parents(node) if p != nullid]
1079 1080 while parentlst:
1080 1081 n = parentlst.pop()
1081 1082 if n not in hasset:
1082 1083 hasset[n] = 1
1083 1084 p = [p for p in revlog.parents(n) if p != nullid]
1084 1085 parentlst.extend(p)
1085 1086 for n in hasset:
1086 1087 msngset.pop(n, None)
1087 1088
1088 1089 # This is a function generating function used to set up an environment
1089 1090 # for the inner function to execute in.
1090 1091 def manifest_and_file_collector(changedfileset):
1091 1092 # This is an information gathering function that gathers
1092 1093 # information from each changeset node that goes out as part of
1093 1094 # the changegroup. The information gathered is a list of which
1094 1095 # manifest nodes are potentially required (the recipient may
1095 1096 # already have them) and total list of all files which were
1096 1097 # changed in any changeset in the changegroup.
1097 1098 #
1098 1099 # We also remember the first changenode we saw any manifest
1099 1100 # referenced by so we can later determine which changenode 'owns'
1100 1101 # the manifest.
1101 1102 def collect_manifests_and_files(clnode):
1102 1103 c = cl.read(clnode)
1103 1104 for f in c[3]:
1104 1105 # This is to make sure we only have one instance of each
1105 1106 # filename string for each filename.
1106 1107 changedfileset.setdefault(f, f)
1107 1108 msng_mnfst_set.setdefault(c[0], clnode)
1108 1109 return collect_manifests_and_files
1109 1110
1110 1111 # Figure out which manifest nodes (of the ones we think might be part
1111 1112 # of the changegroup) the recipient must know about and remove them
1112 1113 # from the changegroup.
1113 1114 def prune_manifests():
1114 1115 has_mnfst_set = {}
1115 1116 for n in msng_mnfst_set:
1116 1117 # If a 'missing' manifest thinks it belongs to a changenode
1117 1118 # the recipient is assumed to have, obviously the recipient
1118 1119 # must have that manifest.
1119 1120 linknode = cl.node(mnfst.linkrev(n))
1120 1121 if linknode in has_cl_set:
1121 1122 has_mnfst_set[n] = 1
1122 1123 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1123 1124
1124 1125 # Use the information collected in collect_manifests_and_files to say
1125 1126 # which changenode any manifestnode belongs to.
1126 1127 def lookup_manifest_link(mnfstnode):
1127 1128 return msng_mnfst_set[mnfstnode]
1128 1129
1129 1130 # A function generating function that sets up the initial environment
1130 1131 # the inner function.
1131 1132 def filenode_collector(changedfiles):
1132 1133 next_rev = [0]
1133 1134 # This gathers information from each manifestnode included in the
1134 1135 # changegroup about which filenodes the manifest node references
1135 1136 # so we can include those in the changegroup too.
1136 1137 #
1137 1138 # It also remembers which changenode each filenode belongs to. It
1138 1139 # does this by assuming the a filenode belongs to the changenode
1139 1140 # the first manifest that references it belongs to.
1140 1141 def collect_msng_filenodes(mnfstnode):
1141 1142 r = mnfst.rev(mnfstnode)
1142 1143 if r == next_rev[0]:
1143 1144 # If the last rev we looked at was the one just previous,
1144 1145 # we only need to see a diff.
1145 1146 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1146 1147 # For each line in the delta
1147 1148 for dline in delta.splitlines():
1148 1149 # get the filename and filenode for that line
1149 1150 f, fnode = dline.split('\0')
1150 1151 fnode = bin(fnode[:40])
1151 1152 f = changedfiles.get(f, None)
1152 1153 # And if the file is in the list of files we care
1153 1154 # about.
1154 1155 if f is not None:
1155 1156 # Get the changenode this manifest belongs to
1156 1157 clnode = msng_mnfst_set[mnfstnode]
1157 1158 # Create the set of filenodes for the file if
1158 1159 # there isn't one already.
1159 1160 ndset = msng_filenode_set.setdefault(f, {})
1160 1161 # And set the filenode's changelog node to the
1161 1162 # manifest's if it hasn't been set already.
1162 1163 ndset.setdefault(fnode, clnode)
1163 1164 else:
1164 1165 # Otherwise we need a full manifest.
1165 1166 m = mnfst.read(mnfstnode)
1166 1167 # For every file in we care about.
1167 1168 for f in changedfiles:
1168 1169 fnode = m.get(f, None)
1169 1170 # If it's in the manifest
1170 1171 if fnode is not None:
1171 1172 # See comments above.
1172 1173 clnode = msng_mnfst_set[mnfstnode]
1173 1174 ndset = msng_filenode_set.setdefault(f, {})
1174 1175 ndset.setdefault(fnode, clnode)
1175 1176 # Remember the revision we hope to see next.
1176 1177 next_rev[0] = r + 1
1177 1178 return collect_msng_filenodes
1178 1179
1179 1180 # We have a list of filenodes we think we need for a file, lets remove
1180 1181 # all those we now the recipient must have.
1181 1182 def prune_filenodes(f, filerevlog):
1182 1183 msngset = msng_filenode_set[f]
1183 1184 hasset = {}
1184 1185 # If a 'missing' filenode thinks it belongs to a changenode we
1185 1186 # assume the recipient must have, then the recipient must have
1186 1187 # that filenode.
1187 1188 for n in msngset:
1188 1189 clnode = cl.node(filerevlog.linkrev(n))
1189 1190 if clnode in has_cl_set:
1190 1191 hasset[n] = 1
1191 1192 prune_parents(filerevlog, hasset, msngset)
1192 1193
1193 1194 # A function generator function that sets up the a context for the
1194 1195 # inner function.
1195 1196 def lookup_filenode_link_func(fname):
1196 1197 msngset = msng_filenode_set[fname]
1197 1198 # Lookup the changenode the filenode belongs to.
1198 1199 def lookup_filenode_link(fnode):
1199 1200 return msngset[fnode]
1200 1201 return lookup_filenode_link
1201 1202
1202 1203 # Now that we have all theses utility functions to help out and
1203 1204 # logically divide up the task, generate the group.
1204 1205 def gengroup():
1205 1206 # The set of changed files starts empty.
1206 1207 changedfiles = {}
1207 1208 # Create a changenode group generator that will call our functions
1208 1209 # back to lookup the owning changenode and collect information.
1209 1210 group = cl.group(msng_cl_lst, identity,
1210 1211 manifest_and_file_collector(changedfiles))
1211 1212 for chnk in group:
1212 1213 yield chnk
1213 1214
1214 1215 # The list of manifests has been collected by the generator
1215 1216 # calling our functions back.
1216 1217 prune_manifests()
1217 1218 msng_mnfst_lst = msng_mnfst_set.keys()
1218 1219 # Sort the manifestnodes by revision number.
1219 1220 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1220 1221 # Create a generator for the manifestnodes that calls our lookup
1221 1222 # and data collection functions back.
1222 1223 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1223 1224 filenode_collector(changedfiles))
1224 1225 for chnk in group:
1225 1226 yield chnk
1226 1227
1227 1228 # These are no longer needed, dereference and toss the memory for
1228 1229 # them.
1229 1230 msng_mnfst_lst = None
1230 1231 msng_mnfst_set.clear()
1231 1232
1232 1233 changedfiles = changedfiles.keys()
1233 1234 changedfiles.sort()
1234 1235 # Go through all our files in order sorted by name.
1235 1236 for fname in changedfiles:
1236 1237 filerevlog = self.file(fname)
1237 1238 # Toss out the filenodes that the recipient isn't really
1238 1239 # missing.
1239 1240 if msng_filenode_set.has_key(fname):
1240 1241 prune_filenodes(fname, filerevlog)
1241 1242 msng_filenode_lst = msng_filenode_set[fname].keys()
1242 1243 else:
1243 1244 msng_filenode_lst = []
1244 1245 # If any filenodes are left, generate the group for them,
1245 1246 # otherwise don't bother.
1246 1247 if len(msng_filenode_lst) > 0:
1247 yield struct.pack(">l", len(fname) + 4) + fname
1248 yield changegroup.genchunk(fname)
1248 1249 # Sort the filenodes by their revision #
1249 1250 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1250 1251 # Create a group generator and only pass in a changenode
1251 1252 # lookup function as we need to collect no information
1252 1253 # from filenodes.
1253 1254 group = filerevlog.group(msng_filenode_lst,
1254 1255 lookup_filenode_link_func(fname))
1255 1256 for chnk in group:
1256 1257 yield chnk
1257 1258 if msng_filenode_set.has_key(fname):
1258 1259 # Don't need this anymore, toss it to free memory.
1259 1260 del msng_filenode_set[fname]
1260 1261 # Signal that no more groups are left.
1261 yield struct.pack(">l", 0)
1262 yield changegroup.closechunk()
1262 1263
1263 1264 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1264 1265
1265 1266 return util.chunkbuffer(gengroup())
1266 1267
1267 1268 def changegroup(self, basenodes, source):
1268 1269 """Generate a changegroup of all nodes that we have that a recipient
1269 1270 doesn't.
1270 1271
1271 1272 This is much easier than the previous function as we can assume that
1272 1273 the recipient has any changenode we aren't sending them."""
1273 1274
1274 1275 self.hook('preoutgoing', throw=True, source=source)
1275 1276
1276 1277 cl = self.changelog
1277 1278 nodes = cl.nodesbetween(basenodes, None)[0]
1278 1279 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1279 1280
1280 1281 def identity(x):
1281 1282 return x
1282 1283
1283 1284 def gennodelst(revlog):
1284 1285 for r in xrange(0, revlog.count()):
1285 1286 n = revlog.node(r)
1286 1287 if revlog.linkrev(n) in revset:
1287 1288 yield n
1288 1289
1289 1290 def changed_file_collector(changedfileset):
1290 1291 def collect_changed_files(clnode):
1291 1292 c = cl.read(clnode)
1292 1293 for fname in c[3]:
1293 1294 changedfileset[fname] = 1
1294 1295 return collect_changed_files
1295 1296
1296 1297 def lookuprevlink_func(revlog):
1297 1298 def lookuprevlink(n):
1298 1299 return cl.node(revlog.linkrev(n))
1299 1300 return lookuprevlink
1300 1301
1301 1302 def gengroup():
1302 1303 # construct a list of all changed files
1303 1304 changedfiles = {}
1304 1305
1305 1306 for chnk in cl.group(nodes, identity,
1306 1307 changed_file_collector(changedfiles)):
1307 1308 yield chnk
1308 1309 changedfiles = changedfiles.keys()
1309 1310 changedfiles.sort()
1310 1311
1311 1312 mnfst = self.manifest
1312 1313 nodeiter = gennodelst(mnfst)
1313 1314 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1314 1315 yield chnk
1315 1316
1316 1317 for fname in changedfiles:
1317 1318 filerevlog = self.file(fname)
1318 1319 nodeiter = gennodelst(filerevlog)
1319 1320 nodeiter = list(nodeiter)
1320 1321 if nodeiter:
1321 yield struct.pack(">l", len(fname) + 4) + fname
1322 yield changegroup.genchunk(fname)
1322 1323 lookup = lookuprevlink_func(filerevlog)
1323 1324 for chnk in filerevlog.group(nodeiter, lookup):
1324 1325 yield chnk
1325 1326
1326 yield struct.pack(">l", 0)
1327 yield changegroup.closechunk()
1327 1328 self.hook('outgoing', node=hex(nodes[0]), source=source)
1328 1329
1329 1330 return util.chunkbuffer(gengroup())
1330 1331
1331 1332 def addchangegroup(self, source):
1332 1333
1333 def getchunk():
1334 d = source.read(4)
1335 if not d:
1336 return ""
1337 l = struct.unpack(">l", d)[0]
1338 if l <= 4:
1339 return ""
1340 d = source.read(l - 4)
1341 if len(d) < l - 4:
1342 raise repo.RepoError(_("premature EOF reading chunk"
1343 " (got %d bytes, expected %d)")
1344 % (len(d), l - 4))
1345 return d
1346
1347 def getgroup():
1348 while 1:
1349 c = getchunk()
1350 if not c:
1351 break
1352 yield c
1353
1354 1334 def csmap(x):
1355 1335 self.ui.debug(_("add changeset %s\n") % short(x))
1356 1336 return self.changelog.count()
1357 1337
1358 1338 def revmap(x):
1359 1339 return self.changelog.rev(x)
1360 1340
1361 1341 if not source:
1362 1342 return
1363 1343
1364 1344 self.hook('prechangegroup', throw=True)
1365 1345
1366 1346 changesets = files = revisions = 0
1367 1347
1368 1348 tr = self.transaction()
1369 1349
1370 1350 oldheads = len(self.changelog.heads())
1371 1351
1372 1352 # pull off the changeset group
1373 1353 self.ui.status(_("adding changesets\n"))
1374 1354 co = self.changelog.tip()
1375 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1355 chunkiter = changegroup.chunkiter(source)
1356 cn = self.changelog.addgroup(chunkiter, csmap, tr, 1) # unique
1376 1357 cnr, cor = map(self.changelog.rev, (cn, co))
1377 1358 if cn == nullid:
1378 1359 cnr = cor
1379 1360 changesets = cnr - cor
1380 1361
1381 1362 # pull off the manifest group
1382 1363 self.ui.status(_("adding manifests\n"))
1383 1364 mm = self.manifest.tip()
1384 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1365 chunkiter = changegroup.chunkiter(source)
1366 mo = self.manifest.addgroup(chunkiter, revmap, tr)
1385 1367
1386 1368 # process the files
1387 1369 self.ui.status(_("adding file changes\n"))
1388 1370 while 1:
1389 f = getchunk()
1371 f = changegroup.getchunk(source)
1390 1372 if not f:
1391 1373 break
1392 1374 self.ui.debug(_("adding %s revisions\n") % f)
1393 1375 fl = self.file(f)
1394 1376 o = fl.count()
1395 n = fl.addgroup(getgroup(), revmap, tr)
1377 chunkiter = changegroup.chunkiter(source)
1378 n = fl.addgroup(chunkiter, revmap, tr)
1396 1379 revisions += fl.count() - o
1397 1380 files += 1
1398 1381
1399 1382 newheads = len(self.changelog.heads())
1400 1383 heads = ""
1401 1384 if oldheads and newheads > oldheads:
1402 1385 heads = _(" (+%d heads)") % (newheads - oldheads)
1403 1386
1404 1387 self.ui.status(_("added %d changesets"
1405 1388 " with %d changes to %d files%s\n")
1406 1389 % (changesets, revisions, files, heads))
1407 1390
1408 1391 self.hook('pretxnchangegroup', throw=True,
1409 1392 node=hex(self.changelog.node(cor+1)))
1410 1393
1411 1394 tr.close()
1412 1395
1413 1396 if changesets > 0:
1414 1397 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1415 1398
1416 1399 for i in range(cor + 1, cnr + 1):
1417 1400 self.hook("incoming", node=hex(self.changelog.node(i)))
1418 1401
1419 1402 def update(self, node, allow=False, force=False, choose=None,
1420 1403 moddirstate=True, forcemerge=False, wlock=None):
1421 1404 pl = self.dirstate.parents()
1422 1405 if not force and pl[1] != nullid:
1423 1406 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1424 1407 return 1
1425 1408
1426 1409 err = False
1427 1410
1428 1411 p1, p2 = pl[0], node
1429 1412 pa = self.changelog.ancestor(p1, p2)
1430 1413 m1n = self.changelog.read(p1)[0]
1431 1414 m2n = self.changelog.read(p2)[0]
1432 1415 man = self.manifest.ancestor(m1n, m2n)
1433 1416 m1 = self.manifest.read(m1n)
1434 1417 mf1 = self.manifest.readflags(m1n)
1435 1418 m2 = self.manifest.read(m2n).copy()
1436 1419 mf2 = self.manifest.readflags(m2n)
1437 1420 ma = self.manifest.read(man)
1438 1421 mfa = self.manifest.readflags(man)
1439 1422
1440 1423 modified, added, removed, deleted, unknown = self.changes()
1441 1424
1442 1425 # is this a jump, or a merge? i.e. is there a linear path
1443 1426 # from p1 to p2?
1444 1427 linear_path = (pa == p1 or pa == p2)
1445 1428
1446 1429 if allow and linear_path:
1447 1430 raise util.Abort(_("there is nothing to merge, "
1448 1431 "just use 'hg update'"))
1449 1432 if allow and not forcemerge:
1450 1433 if modified or added or removed:
1451 1434 raise util.Abort(_("outstanding uncommited changes"))
1452 1435 if not forcemerge and not force:
1453 1436 for f in unknown:
1454 1437 if f in m2:
1455 1438 t1 = self.wread(f)
1456 1439 t2 = self.file(f).read(m2[f])
1457 1440 if cmp(t1, t2) != 0:
1458 1441 raise util.Abort(_("'%s' already exists in the working"
1459 1442 " dir and differs from remote") % f)
1460 1443
1461 1444 # resolve the manifest to determine which files
1462 1445 # we care about merging
1463 1446 self.ui.note(_("resolving manifests\n"))
1464 1447 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1465 1448 (force, allow, moddirstate, linear_path))
1466 1449 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1467 1450 (short(man), short(m1n), short(m2n)))
1468 1451
1469 1452 merge = {}
1470 1453 get = {}
1471 1454 remove = []
1472 1455
1473 1456 # construct a working dir manifest
1474 1457 mw = m1.copy()
1475 1458 mfw = mf1.copy()
1476 1459 umap = dict.fromkeys(unknown)
1477 1460
1478 1461 for f in added + modified + unknown:
1479 1462 mw[f] = ""
1480 1463 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1481 1464
1482 1465 if moddirstate and not wlock:
1483 1466 wlock = self.wlock()
1484 1467
1485 1468 for f in deleted + removed:
1486 1469 if f in mw:
1487 1470 del mw[f]
1488 1471
1489 1472 # If we're jumping between revisions (as opposed to merging),
1490 1473 # and if neither the working directory nor the target rev has
1491 1474 # the file, then we need to remove it from the dirstate, to
1492 1475 # prevent the dirstate from listing the file when it is no
1493 1476 # longer in the manifest.
1494 1477 if moddirstate and linear_path and f not in m2:
1495 1478 self.dirstate.forget((f,))
1496 1479
1497 1480 # Compare manifests
1498 1481 for f, n in mw.iteritems():
1499 1482 if choose and not choose(f):
1500 1483 continue
1501 1484 if f in m2:
1502 1485 s = 0
1503 1486
1504 1487 # is the wfile new since m1, and match m2?
1505 1488 if f not in m1:
1506 1489 t1 = self.wread(f)
1507 1490 t2 = self.file(f).read(m2[f])
1508 1491 if cmp(t1, t2) == 0:
1509 1492 n = m2[f]
1510 1493 del t1, t2
1511 1494
1512 1495 # are files different?
1513 1496 if n != m2[f]:
1514 1497 a = ma.get(f, nullid)
1515 1498 # are both different from the ancestor?
1516 1499 if n != a and m2[f] != a:
1517 1500 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1518 1501 # merge executable bits
1519 1502 # "if we changed or they changed, change in merge"
1520 1503 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1521 1504 mode = ((a^b) | (a^c)) ^ a
1522 1505 merge[f] = (m1.get(f, nullid), m2[f], mode)
1523 1506 s = 1
1524 1507 # are we clobbering?
1525 1508 # is remote's version newer?
1526 1509 # or are we going back in time?
1527 1510 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1528 1511 self.ui.debug(_(" remote %s is newer, get\n") % f)
1529 1512 get[f] = m2[f]
1530 1513 s = 1
1531 1514 elif f in umap:
1532 1515 # this unknown file is the same as the checkout
1533 1516 get[f] = m2[f]
1534 1517
1535 1518 if not s and mfw[f] != mf2[f]:
1536 1519 if force:
1537 1520 self.ui.debug(_(" updating permissions for %s\n") % f)
1538 1521 util.set_exec(self.wjoin(f), mf2[f])
1539 1522 else:
1540 1523 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1541 1524 mode = ((a^b) | (a^c)) ^ a
1542 1525 if mode != b:
1543 1526 self.ui.debug(_(" updating permissions for %s\n")
1544 1527 % f)
1545 1528 util.set_exec(self.wjoin(f), mode)
1546 1529 del m2[f]
1547 1530 elif f in ma:
1548 1531 if n != ma[f]:
1549 1532 r = _("d")
1550 1533 if not force and (linear_path or allow):
1551 1534 r = self.ui.prompt(
1552 1535 (_(" local changed %s which remote deleted\n") % f) +
1553 1536 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1554 1537 if r == _("d"):
1555 1538 remove.append(f)
1556 1539 else:
1557 1540 self.ui.debug(_("other deleted %s\n") % f)
1558 1541 remove.append(f) # other deleted it
1559 1542 else:
1560 1543 # file is created on branch or in working directory
1561 1544 if force and f not in umap:
1562 1545 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1563 1546 remove.append(f)
1564 1547 elif n == m1.get(f, nullid): # same as parent
1565 1548 if p2 == pa: # going backwards?
1566 1549 self.ui.debug(_("remote deleted %s\n") % f)
1567 1550 remove.append(f)
1568 1551 else:
1569 1552 self.ui.debug(_("local modified %s, keeping\n") % f)
1570 1553 else:
1571 1554 self.ui.debug(_("working dir created %s, keeping\n") % f)
1572 1555
1573 1556 for f, n in m2.iteritems():
1574 1557 if choose and not choose(f):
1575 1558 continue
1576 1559 if f[0] == "/":
1577 1560 continue
1578 1561 if f in ma and n != ma[f]:
1579 1562 r = _("k")
1580 1563 if not force and (linear_path or allow):
1581 1564 r = self.ui.prompt(
1582 1565 (_("remote changed %s which local deleted\n") % f) +
1583 1566 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1584 1567 if r == _("k"):
1585 1568 get[f] = n
1586 1569 elif f not in ma:
1587 1570 self.ui.debug(_("remote created %s\n") % f)
1588 1571 get[f] = n
1589 1572 else:
1590 1573 if force or p2 == pa: # going backwards?
1591 1574 self.ui.debug(_("local deleted %s, recreating\n") % f)
1592 1575 get[f] = n
1593 1576 else:
1594 1577 self.ui.debug(_("local deleted %s\n") % f)
1595 1578
1596 1579 del mw, m1, m2, ma
1597 1580
1598 1581 if force:
1599 1582 for f in merge:
1600 1583 get[f] = merge[f][1]
1601 1584 merge = {}
1602 1585
1603 1586 if linear_path or force:
1604 1587 # we don't need to do any magic, just jump to the new rev
1605 1588 branch_merge = False
1606 1589 p1, p2 = p2, nullid
1607 1590 else:
1608 1591 if not allow:
1609 1592 self.ui.status(_("this update spans a branch"
1610 1593 " affecting the following files:\n"))
1611 1594 fl = merge.keys() + get.keys()
1612 1595 fl.sort()
1613 1596 for f in fl:
1614 1597 cf = ""
1615 1598 if f in merge:
1616 1599 cf = _(" (resolve)")
1617 1600 self.ui.status(" %s%s\n" % (f, cf))
1618 1601 self.ui.warn(_("aborting update spanning branches!\n"))
1619 1602 self.ui.status(_("(use update -m to merge across branches"
1620 1603 " or -C to lose changes)\n"))
1621 1604 return 1
1622 1605 branch_merge = True
1623 1606
1624 1607 # get the files we don't need to change
1625 1608 files = get.keys()
1626 1609 files.sort()
1627 1610 for f in files:
1628 1611 if f[0] == "/":
1629 1612 continue
1630 1613 self.ui.note(_("getting %s\n") % f)
1631 1614 t = self.file(f).read(get[f])
1632 1615 self.wwrite(f, t)
1633 1616 util.set_exec(self.wjoin(f), mf2[f])
1634 1617 if moddirstate:
1635 1618 if branch_merge:
1636 1619 self.dirstate.update([f], 'n', st_mtime=-1)
1637 1620 else:
1638 1621 self.dirstate.update([f], 'n')
1639 1622
1640 1623 # merge the tricky bits
1641 1624 failedmerge = []
1642 1625 files = merge.keys()
1643 1626 files.sort()
1644 1627 xp1 = hex(p1)
1645 1628 xp2 = hex(p2)
1646 1629 for f in files:
1647 1630 self.ui.status(_("merging %s\n") % f)
1648 1631 my, other, flag = merge[f]
1649 1632 ret = self.merge3(f, my, other, xp1, xp2)
1650 1633 if ret:
1651 1634 err = True
1652 1635 failedmerge.append(f)
1653 1636 util.set_exec(self.wjoin(f), flag)
1654 1637 if moddirstate:
1655 1638 if branch_merge:
1656 1639 # We've done a branch merge, mark this file as merged
1657 1640 # so that we properly record the merger later
1658 1641 self.dirstate.update([f], 'm')
1659 1642 else:
1660 1643 # We've update-merged a locally modified file, so
1661 1644 # we set the dirstate to emulate a normal checkout
1662 1645 # of that file some time in the past. Thus our
1663 1646 # merge will appear as a normal local file
1664 1647 # modification.
1665 1648 f_len = len(self.file(f).read(other))
1666 1649 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1667 1650
1668 1651 remove.sort()
1669 1652 for f in remove:
1670 1653 self.ui.note(_("removing %s\n") % f)
1671 1654 util.audit_path(f)
1672 1655 try:
1673 1656 util.unlink(self.wjoin(f))
1674 1657 except OSError, inst:
1675 1658 if inst.errno != errno.ENOENT:
1676 1659 self.ui.warn(_("update failed to remove %s: %s!\n") %
1677 1660 (f, inst.strerror))
1678 1661 if moddirstate:
1679 1662 if branch_merge:
1680 1663 self.dirstate.update(remove, 'r')
1681 1664 else:
1682 1665 self.dirstate.forget(remove)
1683 1666
1684 1667 if moddirstate:
1685 1668 self.dirstate.setparents(p1, p2)
1686 1669
1687 1670 stat = ((len(get), _("updated")),
1688 1671 (len(merge) - len(failedmerge), _("merged")),
1689 1672 (len(remove), _("removed")),
1690 1673 (len(failedmerge), _("unresolved")))
1691 1674 note = ", ".join([_("%d files %s") % s for s in stat])
1692 1675 self.ui.note("%s\n" % note)
1693 1676 if moddirstate and branch_merge:
1694 1677 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1695 1678
1696 1679 return err
1697 1680
1698 1681 def merge3(self, fn, my, other, p1, p2):
1699 1682 """perform a 3-way merge in the working directory"""
1700 1683
1701 1684 def temp(prefix, node):
1702 1685 pre = "%s~%s." % (os.path.basename(fn), prefix)
1703 1686 (fd, name) = tempfile.mkstemp("", pre)
1704 1687 f = os.fdopen(fd, "wb")
1705 1688 self.wwrite(fn, fl.read(node), f)
1706 1689 f.close()
1707 1690 return name
1708 1691
1709 1692 fl = self.file(fn)
1710 1693 base = fl.ancestor(my, other)
1711 1694 a = self.wjoin(fn)
1712 1695 b = temp("base", base)
1713 1696 c = temp("other", other)
1714 1697
1715 1698 self.ui.note(_("resolving %s\n") % fn)
1716 1699 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1717 1700 (fn, short(my), short(other), short(base)))
1718 1701
1719 1702 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1720 1703 or "hgmerge")
1721 1704 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1722 1705 environ={'HG_FILE': fn,
1723 1706 'HG_MY_NODE': p1,
1724 1707 'HG_OTHER_NODE': p2,
1725 1708 'HG_FILE_MY_NODE': hex(my),
1726 1709 'HG_FILE_OTHER_NODE': hex(other),
1727 1710 'HG_FILE_BASE_NODE': hex(base)})
1728 1711 if r:
1729 1712 self.ui.warn(_("merging %s failed!\n") % fn)
1730 1713
1731 1714 os.unlink(b)
1732 1715 os.unlink(c)
1733 1716 return r
1734 1717
1735 1718 def verify(self):
1736 1719 filelinkrevs = {}
1737 1720 filenodes = {}
1738 1721 changesets = revisions = files = 0
1739 1722 errors = [0]
1740 1723 neededmanifests = {}
1741 1724
1742 1725 def err(msg):
1743 1726 self.ui.warn(msg + "\n")
1744 1727 errors[0] += 1
1745 1728
1746 1729 def checksize(obj, name):
1747 1730 d = obj.checksize()
1748 1731 if d[0]:
1749 1732 err(_("%s data length off by %d bytes") % (name, d[0]))
1750 1733 if d[1]:
1751 1734 err(_("%s index contains %d extra bytes") % (name, d[1]))
1752 1735
1753 1736 seen = {}
1754 1737 self.ui.status(_("checking changesets\n"))
1755 1738 checksize(self.changelog, "changelog")
1756 1739
1757 1740 for i in range(self.changelog.count()):
1758 1741 changesets += 1
1759 1742 n = self.changelog.node(i)
1760 1743 l = self.changelog.linkrev(n)
1761 1744 if l != i:
1762 1745 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1763 1746 if n in seen:
1764 1747 err(_("duplicate changeset at revision %d") % i)
1765 1748 seen[n] = 1
1766 1749
1767 1750 for p in self.changelog.parents(n):
1768 1751 if p not in self.changelog.nodemap:
1769 1752 err(_("changeset %s has unknown parent %s") %
1770 1753 (short(n), short(p)))
1771 1754 try:
1772 1755 changes = self.changelog.read(n)
1773 1756 except KeyboardInterrupt:
1774 1757 self.ui.warn(_("interrupted"))
1775 1758 raise
1776 1759 except Exception, inst:
1777 1760 err(_("unpacking changeset %s: %s") % (short(n), inst))
1778 1761 continue
1779 1762
1780 1763 neededmanifests[changes[0]] = n
1781 1764
1782 1765 for f in changes[3]:
1783 1766 filelinkrevs.setdefault(f, []).append(i)
1784 1767
1785 1768 seen = {}
1786 1769 self.ui.status(_("checking manifests\n"))
1787 1770 checksize(self.manifest, "manifest")
1788 1771
1789 1772 for i in range(self.manifest.count()):
1790 1773 n = self.manifest.node(i)
1791 1774 l = self.manifest.linkrev(n)
1792 1775
1793 1776 if l < 0 or l >= self.changelog.count():
1794 1777 err(_("bad manifest link (%d) at revision %d") % (l, i))
1795 1778
1796 1779 if n in neededmanifests:
1797 1780 del neededmanifests[n]
1798 1781
1799 1782 if n in seen:
1800 1783 err(_("duplicate manifest at revision %d") % i)
1801 1784
1802 1785 seen[n] = 1
1803 1786
1804 1787 for p in self.manifest.parents(n):
1805 1788 if p not in self.manifest.nodemap:
1806 1789 err(_("manifest %s has unknown parent %s") %
1807 1790 (short(n), short(p)))
1808 1791
1809 1792 try:
1810 1793 delta = mdiff.patchtext(self.manifest.delta(n))
1811 1794 except KeyboardInterrupt:
1812 1795 self.ui.warn(_("interrupted"))
1813 1796 raise
1814 1797 except Exception, inst:
1815 1798 err(_("unpacking manifest %s: %s") % (short(n), inst))
1816 1799 continue
1817 1800
1818 1801 try:
1819 1802 ff = [ l.split('\0') for l in delta.splitlines() ]
1820 1803 for f, fn in ff:
1821 1804 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1822 1805 except (ValueError, TypeError), inst:
1823 1806 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1824 1807
1825 1808 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1826 1809
1827 1810 for m, c in neededmanifests.items():
1828 1811 err(_("Changeset %s refers to unknown manifest %s") %
1829 1812 (short(m), short(c)))
1830 1813 del neededmanifests
1831 1814
1832 1815 for f in filenodes:
1833 1816 if f not in filelinkrevs:
1834 1817 err(_("file %s in manifest but not in changesets") % f)
1835 1818
1836 1819 for f in filelinkrevs:
1837 1820 if f not in filenodes:
1838 1821 err(_("file %s in changeset but not in manifest") % f)
1839 1822
1840 1823 self.ui.status(_("checking files\n"))
1841 1824 ff = filenodes.keys()
1842 1825 ff.sort()
1843 1826 for f in ff:
1844 1827 if f == "/dev/null":
1845 1828 continue
1846 1829 files += 1
1847 1830 if not f:
1848 1831 err(_("file without name in manifest %s") % short(n))
1849 1832 continue
1850 1833 fl = self.file(f)
1851 1834 checksize(fl, f)
1852 1835
1853 1836 nodes = {nullid: 1}
1854 1837 seen = {}
1855 1838 for i in range(fl.count()):
1856 1839 revisions += 1
1857 1840 n = fl.node(i)
1858 1841
1859 1842 if n in seen:
1860 1843 err(_("%s: duplicate revision %d") % (f, i))
1861 1844 if n not in filenodes[f]:
1862 1845 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1863 1846 else:
1864 1847 del filenodes[f][n]
1865 1848
1866 1849 flr = fl.linkrev(n)
1867 1850 if flr not in filelinkrevs.get(f, []):
1868 1851 err(_("%s:%s points to unexpected changeset %d")
1869 1852 % (f, short(n), flr))
1870 1853 else:
1871 1854 filelinkrevs[f].remove(flr)
1872 1855
1873 1856 # verify contents
1874 1857 try:
1875 1858 t = fl.read(n)
1876 1859 except KeyboardInterrupt:
1877 1860 self.ui.warn(_("interrupted"))
1878 1861 raise
1879 1862 except Exception, inst:
1880 1863 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1881 1864
1882 1865 # verify parents
1883 1866 (p1, p2) = fl.parents(n)
1884 1867 if p1 not in nodes:
1885 1868 err(_("file %s:%s unknown parent 1 %s") %
1886 1869 (f, short(n), short(p1)))
1887 1870 if p2 not in nodes:
1888 1871 err(_("file %s:%s unknown parent 2 %s") %
1889 1872 (f, short(n), short(p1)))
1890 1873 nodes[n] = 1
1891 1874
1892 1875 # cross-check
1893 1876 for node in filenodes[f]:
1894 1877 err(_("node %s in manifests not in %s") % (hex(node), f))
1895 1878
1896 1879 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1897 1880 (files, changesets, revisions))
1898 1881
1899 1882 if errors[0]:
1900 1883 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1901 1884 return 1
1902 1885
1903 1886 # used to avoid circular references so destructors work
1904 1887 def aftertrans(base):
1905 1888 p = base
1906 1889 def a():
1907 1890 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1908 1891 util.rename(os.path.join(p, "journal.dirstate"),
1909 1892 os.path.join(p, "undo.dirstate"))
1910 1893 return a
1911 1894
@@ -1,883 +1,881 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 demandload(globals(), "binascii errno heapq mdiff os sha struct zlib")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct zlib")
17 18
18 19 def hash(text, p1, p2):
19 20 """generate a hash from the given text and its parent hashes
20 21
21 22 This hash combines both the current file contents and its history
22 23 in a manner that makes it easy to distinguish nodes with the same
23 24 content in the revision graph.
24 25 """
25 26 l = [p1, p2]
26 27 l.sort()
27 28 s = sha.new(l[0])
28 29 s.update(l[1])
29 30 s.update(text)
30 31 return s.digest()
31 32
32 33 def compress(text):
33 34 """ generate a possibly-compressed representation of text """
34 35 if not text: return ("", text)
35 36 if len(text) < 44:
36 37 if text[0] == '\0': return ("", text)
37 38 return ('u', text)
38 39 bin = zlib.compress(text)
39 40 if len(bin) > len(text):
40 41 if text[0] == '\0': return ("", text)
41 42 return ('u', text)
42 43 return ("", bin)
43 44
44 45 def decompress(bin):
45 46 """ decompress the given input """
46 47 if not bin: return bin
47 48 t = bin[0]
48 49 if t == '\0': return bin
49 50 if t == 'x': return zlib.decompress(bin)
50 51 if t == 'u': return bin[1:]
51 52 raise RevlogError(_("unknown compression type %r") % t)
52 53
53 54 indexformat = ">4l20s20s20s"
54 55
55 56 class lazyparser(object):
56 57 """
57 58 this class avoids the need to parse the entirety of large indices
58 59
59 60 By default we parse and load 1000 entries at a time.
60 61
61 62 If no position is specified, we load the whole index, and replace
62 63 the lazy objects in revlog with the underlying objects for
63 64 efficiency in cases where we look at most of the nodes.
64 65 """
65 66 def __init__(self, data, revlog):
66 67 self.data = data
67 68 self.s = struct.calcsize(indexformat)
68 69 self.l = len(data)/self.s
69 70 self.index = [None] * self.l
70 71 self.map = {nullid: -1}
71 72 self.all = 0
72 73 self.revlog = revlog
73 74
74 75 def trunc(self, pos):
75 76 self.l = pos/self.s
76 77
77 78 def load(self, pos=None):
78 79 if self.all: return
79 80 if pos is not None:
80 81 block = pos / 1000
81 82 i = block * 1000
82 83 end = min(self.l, i + 1000)
83 84 else:
84 85 self.all = 1
85 86 i = 0
86 87 end = self.l
87 88 self.revlog.index = self.index
88 89 self.revlog.nodemap = self.map
89 90
90 91 while i < end:
91 92 d = self.data[i * self.s: (i + 1) * self.s]
92 93 e = struct.unpack(indexformat, d)
93 94 self.index[i] = e
94 95 self.map[e[6]] = i
95 96 i += 1
96 97
97 98 class lazyindex(object):
98 99 """a lazy version of the index array"""
99 100 def __init__(self, parser):
100 101 self.p = parser
101 102 def __len__(self):
102 103 return len(self.p.index)
103 104 def load(self, pos):
104 105 if pos < 0:
105 106 pos += len(self.p.index)
106 107 self.p.load(pos)
107 108 return self.p.index[pos]
108 109 def __getitem__(self, pos):
109 110 return self.p.index[pos] or self.load(pos)
110 111 def __delitem__(self, pos):
111 112 del self.p.index[pos]
112 113 def append(self, e):
113 114 self.p.index.append(e)
114 115 def trunc(self, pos):
115 116 self.p.trunc(pos)
116 117
117 118 class lazymap(object):
118 119 """a lazy version of the node map"""
119 120 def __init__(self, parser):
120 121 self.p = parser
121 122 def load(self, key):
122 123 if self.p.all: return
123 124 n = self.p.data.find(key)
124 125 if n < 0:
125 126 raise KeyError(key)
126 127 pos = n / self.p.s
127 128 self.p.load(pos)
128 129 def __contains__(self, key):
129 130 self.p.load()
130 131 return key in self.p.map
131 132 def __iter__(self):
132 133 yield nullid
133 134 for i in xrange(self.p.l):
134 135 try:
135 136 yield self.p.index[i][6]
136 137 except:
137 138 self.p.load(i)
138 139 yield self.p.index[i][6]
139 140 def __getitem__(self, key):
140 141 try:
141 142 return self.p.map[key]
142 143 except KeyError:
143 144 try:
144 145 self.load(key)
145 146 return self.p.map[key]
146 147 except KeyError:
147 148 raise KeyError("node " + hex(key))
148 149 def __setitem__(self, key, val):
149 150 self.p.map[key] = val
150 151 def __delitem__(self, key):
151 152 del self.p.map[key]
152 153
153 154 class RevlogError(Exception): pass
154 155
155 156 class revlog(object):
156 157 """
157 158 the underlying revision storage object
158 159
159 160 A revlog consists of two parts, an index and the revision data.
160 161
161 162 The index is a file with a fixed record size containing
162 163 information on each revision, includings its nodeid (hash), the
163 164 nodeids of its parents, the position and offset of its data within
164 165 the data file, and the revision it's based on. Finally, each entry
165 166 contains a linkrev entry that can serve as a pointer to external
166 167 data.
167 168
168 169 The revision data itself is a linear collection of data chunks.
169 170 Each chunk represents a revision and is usually represented as a
170 171 delta against the previous chunk. To bound lookup time, runs of
171 172 deltas are limited to about 2 times the length of the original
172 173 version data. This makes retrieval of a version proportional to
173 174 its size, or O(1) relative to the number of revisions.
174 175
175 176 Both pieces of the revlog are written to in an append-only
176 177 fashion, which means we never need to rewrite a file to insert or
177 178 remove data, and can use some simple techniques to avoid the need
178 179 for locking while reading.
179 180 """
180 181 def __init__(self, opener, indexfile, datafile):
181 182 """
182 183 create a revlog object
183 184
184 185 opener is a function that abstracts the file opening operation
185 186 and can be used to implement COW semantics or the like.
186 187 """
187 188 self.indexfile = indexfile
188 189 self.datafile = datafile
189 190 self.opener = opener
190 191
191 192 self.indexstat = None
192 193 self.cache = None
193 194 self.chunkcache = None
194 195 self.load()
195 196
196 197 def load(self):
197 198 try:
198 199 f = self.opener(self.indexfile)
199 200 except IOError, inst:
200 201 if inst.errno != errno.ENOENT:
201 202 raise
202 203 i = ""
203 204 else:
204 205 try:
205 206 st = os.fstat(f.fileno())
206 207 except AttributeError, inst:
207 208 st = None
208 209 else:
209 210 oldst = self.indexstat
210 211 if (oldst and st.st_dev == oldst.st_dev
211 212 and st.st_ino == oldst.st_ino
212 213 and st.st_mtime == oldst.st_mtime
213 214 and st.st_ctime == oldst.st_ctime):
214 215 return
215 216 self.indexstat = st
216 217 i = f.read()
217 218
218 219 if i and i[:4] != "\0\0\0\0":
219 220 raise RevlogError(_("incompatible revlog signature on %s") %
220 221 self.indexfile)
221 222
222 223 if len(i) > 10000:
223 224 # big index, let's parse it on demand
224 225 parser = lazyparser(i, self)
225 226 self.index = lazyindex(parser)
226 227 self.nodemap = lazymap(parser)
227 228 else:
228 229 s = struct.calcsize(indexformat)
229 230 l = len(i) / s
230 231 self.index = [None] * l
231 232 m = [None] * l
232 233
233 234 n = 0
234 235 for f in xrange(0, l * s, s):
235 236 # offset, size, base, linkrev, p1, p2, nodeid
236 237 e = struct.unpack(indexformat, i[f:f + s])
237 238 m[n] = (e[6], n)
238 239 self.index[n] = e
239 240 n += 1
240 241
241 242 self.nodemap = dict(m)
242 243 self.nodemap[nullid] = -1
243 244
244 245 def tip(self): return self.node(len(self.index) - 1)
245 246 def count(self): return len(self.index)
246 247 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
247 248 def rev(self, node):
248 249 try:
249 250 return self.nodemap[node]
250 251 except KeyError:
251 252 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
252 253 def linkrev(self, node): return self.index[self.rev(node)][3]
253 254 def parents(self, node):
254 255 if node == nullid: return (nullid, nullid)
255 256 return self.index[self.rev(node)][4:6]
256 257
257 258 def start(self, rev): return (rev < 0) and -1 or self.index[rev][0]
258 259 def length(self, rev):
259 260 if rev < 0:
260 261 return 0
261 262 else:
262 263 return self.index[rev][1]
263 264 def end(self, rev): return self.start(rev) + self.length(rev)
264 265 def base(self, rev): return (rev < 0) and rev or self.index[rev][2]
265 266
266 267 def reachable(self, rev, stop=None):
267 268 reachable = {}
268 269 visit = [rev]
269 270 reachable[rev] = 1
270 271 if stop:
271 272 stopn = self.rev(stop)
272 273 else:
273 274 stopn = 0
274 275 while visit:
275 276 n = visit.pop(0)
276 277 if n == stop:
277 278 continue
278 279 if n == nullid:
279 280 continue
280 281 for p in self.parents(n):
281 282 if self.rev(p) < stopn:
282 283 continue
283 284 if p not in reachable:
284 285 reachable[p] = 1
285 286 visit.append(p)
286 287 return reachable
287 288
288 289 def nodesbetween(self, roots=None, heads=None):
289 290 """Return a tuple containing three elements. Elements 1 and 2 contain
290 291 a final list bases and heads after all the unreachable ones have been
291 292 pruned. Element 0 contains a topologically sorted list of all
292 293
293 294 nodes that satisfy these constraints:
294 295 1. All nodes must be descended from a node in roots (the nodes on
295 296 roots are considered descended from themselves).
296 297 2. All nodes must also be ancestors of a node in heads (the nodes in
297 298 heads are considered to be their own ancestors).
298 299
299 300 If roots is unspecified, nullid is assumed as the only root.
300 301 If heads is unspecified, it is taken to be the output of the
301 302 heads method (i.e. a list of all nodes in the repository that
302 303 have no children)."""
303 304 nonodes = ([], [], [])
304 305 if roots is not None:
305 306 roots = list(roots)
306 307 if not roots:
307 308 return nonodes
308 309 lowestrev = min([self.rev(n) for n in roots])
309 310 else:
310 311 roots = [nullid] # Everybody's a descendent of nullid
311 312 lowestrev = -1
312 313 if (lowestrev == -1) and (heads is None):
313 314 # We want _all_ the nodes!
314 315 return ([self.node(r) for r in xrange(0, self.count())],
315 316 [nullid], list(self.heads()))
316 317 if heads is None:
317 318 # All nodes are ancestors, so the latest ancestor is the last
318 319 # node.
319 320 highestrev = self.count() - 1
320 321 # Set ancestors to None to signal that every node is an ancestor.
321 322 ancestors = None
322 323 # Set heads to an empty dictionary for later discovery of heads
323 324 heads = {}
324 325 else:
325 326 heads = list(heads)
326 327 if not heads:
327 328 return nonodes
328 329 ancestors = {}
329 330 # Start at the top and keep marking parents until we're done.
330 331 nodestotag = heads[:]
331 332 # Turn heads into a dictionary so we can remove 'fake' heads.
332 333 # Also, later we will be using it to filter out the heads we can't
333 334 # find from roots.
334 335 heads = dict.fromkeys(heads, 0)
335 336 # Remember where the top was so we can use it as a limit later.
336 337 highestrev = max([self.rev(n) for n in nodestotag])
337 338 while nodestotag:
338 339 # grab a node to tag
339 340 n = nodestotag.pop()
340 341 # Never tag nullid
341 342 if n == nullid:
342 343 continue
343 344 # A node's revision number represents its place in a
344 345 # topologically sorted list of nodes.
345 346 r = self.rev(n)
346 347 if r >= lowestrev:
347 348 if n not in ancestors:
348 349 # If we are possibly a descendent of one of the roots
349 350 # and we haven't already been marked as an ancestor
350 351 ancestors[n] = 1 # Mark as ancestor
351 352 # Add non-nullid parents to list of nodes to tag.
352 353 nodestotag.extend([p for p in self.parents(n) if
353 354 p != nullid])
354 355 elif n in heads: # We've seen it before, is it a fake head?
355 356 # So it is, real heads should not be the ancestors of
356 357 # any other heads.
357 358 heads.pop(n)
358 359 if not ancestors:
359 360 return nonodes
360 361 # Now that we have our set of ancestors, we want to remove any
361 362 # roots that are not ancestors.
362 363
363 364 # If one of the roots was nullid, everything is included anyway.
364 365 if lowestrev > -1:
365 366 # But, since we weren't, let's recompute the lowest rev to not
366 367 # include roots that aren't ancestors.
367 368
368 369 # Filter out roots that aren't ancestors of heads
369 370 roots = [n for n in roots if n in ancestors]
370 371 # Recompute the lowest revision
371 372 if roots:
372 373 lowestrev = min([self.rev(n) for n in roots])
373 374 else:
374 375 # No more roots? Return empty list
375 376 return nonodes
376 377 else:
377 378 # We are descending from nullid, and don't need to care about
378 379 # any other roots.
379 380 lowestrev = -1
380 381 roots = [nullid]
381 382 # Transform our roots list into a 'set' (i.e. a dictionary where the
382 383 # values don't matter.
383 384 descendents = dict.fromkeys(roots, 1)
384 385 # Also, keep the original roots so we can filter out roots that aren't
385 386 # 'real' roots (i.e. are descended from other roots).
386 387 roots = descendents.copy()
387 388 # Our topologically sorted list of output nodes.
388 389 orderedout = []
389 390 # Don't start at nullid since we don't want nullid in our output list,
390 391 # and if nullid shows up in descedents, empty parents will look like
391 392 # they're descendents.
392 393 for r in xrange(max(lowestrev, 0), highestrev + 1):
393 394 n = self.node(r)
394 395 isdescendent = False
395 396 if lowestrev == -1: # Everybody is a descendent of nullid
396 397 isdescendent = True
397 398 elif n in descendents:
398 399 # n is already a descendent
399 400 isdescendent = True
400 401 # This check only needs to be done here because all the roots
401 402 # will start being marked is descendents before the loop.
402 403 if n in roots:
403 404 # If n was a root, check if it's a 'real' root.
404 405 p = tuple(self.parents(n))
405 406 # If any of its parents are descendents, it's not a root.
406 407 if (p[0] in descendents) or (p[1] in descendents):
407 408 roots.pop(n)
408 409 else:
409 410 p = tuple(self.parents(n))
410 411 # A node is a descendent if either of its parents are
411 412 # descendents. (We seeded the dependents list with the roots
412 413 # up there, remember?)
413 414 if (p[0] in descendents) or (p[1] in descendents):
414 415 descendents[n] = 1
415 416 isdescendent = True
416 417 if isdescendent and ((ancestors is None) or (n in ancestors)):
417 418 # Only include nodes that are both descendents and ancestors.
418 419 orderedout.append(n)
419 420 if (ancestors is not None) and (n in heads):
420 421 # We're trying to figure out which heads are reachable
421 422 # from roots.
422 423 # Mark this head as having been reached
423 424 heads[n] = 1
424 425 elif ancestors is None:
425 426 # Otherwise, we're trying to discover the heads.
426 427 # Assume this is a head because if it isn't, the next step
427 428 # will eventually remove it.
428 429 heads[n] = 1
429 430 # But, obviously its parents aren't.
430 431 for p in self.parents(n):
431 432 heads.pop(p, None)
432 433 heads = [n for n in heads.iterkeys() if heads[n] != 0]
433 434 roots = roots.keys()
434 435 assert orderedout
435 436 assert roots
436 437 assert heads
437 438 return (orderedout, roots, heads)
438 439
439 440 def heads(self, start=None):
440 441 """return the list of all nodes that have no children
441 442
442 443 if start is specified, only heads that are descendants of
443 444 start will be returned
444 445
445 446 """
446 447 if start is None:
447 448 start = nullid
448 449 reachable = {start: 1}
449 450 heads = {start: 1}
450 451 startrev = self.rev(start)
451 452
452 453 for r in xrange(startrev + 1, self.count()):
453 454 n = self.node(r)
454 455 for pn in self.parents(n):
455 456 if pn in reachable:
456 457 reachable[n] = 1
457 458 heads[n] = 1
458 459 if pn in heads:
459 460 del heads[pn]
460 461 return heads.keys()
461 462
462 463 def children(self, node):
463 464 """find the children of a given node"""
464 465 c = []
465 466 p = self.rev(node)
466 467 for r in range(p + 1, self.count()):
467 468 n = self.node(r)
468 469 for pn in self.parents(n):
469 470 if pn == node:
470 471 c.append(n)
471 472 continue
472 473 elif pn == nullid:
473 474 continue
474 475 return c
475 476
476 477 def lookup(self, id):
477 478 """locate a node based on revision number or subset of hex nodeid"""
478 479 try:
479 480 rev = int(id)
480 481 if str(rev) != id: raise ValueError
481 482 if rev < 0: rev = self.count() + rev
482 483 if rev < 0 or rev >= self.count(): raise ValueError
483 484 return self.node(rev)
484 485 except (ValueError, OverflowError):
485 486 c = []
486 487 for n in self.nodemap:
487 488 if hex(n).startswith(id):
488 489 c.append(n)
489 490 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
490 491 if len(c) < 1: raise RevlogError(_("No match found"))
491 492 return c[0]
492 493
493 494 return None
494 495
495 496 def diff(self, a, b):
496 497 """return a delta between two revisions"""
497 498 return mdiff.textdiff(a, b)
498 499
499 500 def patches(self, t, pl):
500 501 """apply a list of patches to a string"""
501 502 return mdiff.patches(t, pl)
502 503
503 504 def chunk(self, rev):
504 505 start, length = self.start(rev), self.length(rev)
505 506 end = start + length
506 507
507 508 def loadcache():
508 509 cache_length = max(4096 * 1024, length) # 4Mo
509 510 df = self.opener(self.datafile)
510 511 df.seek(start)
511 512 self.chunkcache = (start, df.read(cache_length))
512 513
513 514 if not self.chunkcache:
514 515 loadcache()
515 516
516 517 cache_start = self.chunkcache[0]
517 518 cache_end = cache_start + len(self.chunkcache[1])
518 519 if start >= cache_start and end <= cache_end:
519 520 # it is cached
520 521 offset = start - cache_start
521 522 else:
522 523 loadcache()
523 524 offset = 0
524 525
525 526 #def checkchunk():
526 527 # df = self.opener(self.datafile)
527 528 # df.seek(start)
528 529 # return df.read(length)
529 530 #assert s == checkchunk()
530 531 return decompress(self.chunkcache[1][offset:offset + length])
531 532
532 533 def delta(self, node):
533 534 """return or calculate a delta between a node and its predecessor"""
534 535 r = self.rev(node)
535 536 return self.revdiff(r - 1, r)
536 537
537 538 def revdiff(self, rev1, rev2):
538 539 """return or calculate a delta between two revisions"""
539 540 b1 = self.base(rev1)
540 541 b2 = self.base(rev2)
541 542 if b1 == b2 and rev1 + 1 == rev2:
542 543 return self.chunk(rev2)
543 544 else:
544 545 return self.diff(self.revision(self.node(rev1)),
545 546 self.revision(self.node(rev2)))
546 547
547 548 def revision(self, node):
548 549 """return an uncompressed revision of a given"""
549 550 if node == nullid: return ""
550 551 if self.cache and self.cache[0] == node: return self.cache[2]
551 552
552 553 # look up what we need to read
553 554 text = None
554 555 rev = self.rev(node)
555 556 base = self.base(rev)
556 557
557 558 # do we have useful data cached?
558 559 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
559 560 base = self.cache[1]
560 561 text = self.cache[2]
561 562 else:
562 563 text = self.chunk(base)
563 564
564 565 bins = []
565 566 for r in xrange(base + 1, rev + 1):
566 567 bins.append(self.chunk(r))
567 568
568 569 text = self.patches(text, bins)
569 570
570 571 p1, p2 = self.parents(node)
571 572 if node != hash(text, p1, p2):
572 573 raise RevlogError(_("integrity check failed on %s:%d")
573 574 % (self.datafile, rev))
574 575
575 576 self.cache = (node, rev, text)
576 577 return text
577 578
578 579 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
579 580 """add a revision to the log
580 581
581 582 text - the revision data to add
582 583 transaction - the transaction object used for rollback
583 584 link - the linkrev data to add
584 585 p1, p2 - the parent nodeids of the revision
585 586 d - an optional precomputed delta
586 587 """
587 588 if text is None: text = ""
588 589 if p1 is None: p1 = self.tip()
589 590 if p2 is None: p2 = nullid
590 591
591 592 node = hash(text, p1, p2)
592 593
593 594 if node in self.nodemap:
594 595 return node
595 596
596 597 n = self.count()
597 598 t = n - 1
598 599
599 600 if n:
600 601 base = self.base(t)
601 602 start = self.start(base)
602 603 end = self.end(t)
603 604 if not d:
604 605 prev = self.revision(self.tip())
605 606 d = self.diff(prev, str(text))
606 607 data = compress(d)
607 608 l = len(data[1]) + len(data[0])
608 609 dist = end - start + l
609 610
610 611 # full versions are inserted when the needed deltas
611 612 # become comparable to the uncompressed text
612 613 if not n or dist > len(text) * 2:
613 614 data = compress(text)
614 615 l = len(data[1]) + len(data[0])
615 616 base = n
616 617 else:
617 618 base = self.base(t)
618 619
619 620 offset = 0
620 621 if t >= 0:
621 622 offset = self.end(t)
622 623
623 624 e = (offset, l, base, link, p1, p2, node)
624 625
625 626 self.index.append(e)
626 627 self.nodemap[node] = n
627 628 entry = struct.pack(indexformat, *e)
628 629
629 630 transaction.add(self.datafile, e[0])
630 631 f = self.opener(self.datafile, "a")
631 632 if data[0]:
632 633 f.write(data[0])
633 634 f.write(data[1])
634 635 transaction.add(self.indexfile, n * len(entry))
635 636 self.opener(self.indexfile, "a").write(entry)
636 637
637 638 self.cache = (node, n, text)
638 639 return node
639 640
640 641 def ancestor(self, a, b):
641 642 """calculate the least common ancestor of nodes a and b"""
642 643 # calculate the distance of every node from root
643 644 dist = {nullid: 0}
644 645 for i in xrange(self.count()):
645 646 n = self.node(i)
646 647 p1, p2 = self.parents(n)
647 648 dist[n] = max(dist[p1], dist[p2]) + 1
648 649
649 650 # traverse ancestors in order of decreasing distance from root
650 651 def ancestors(node):
651 652 # we store negative distances because heap returns smallest member
652 653 h = [(-dist[node], node)]
653 654 seen = {}
654 655 while h:
655 656 d, n = heapq.heappop(h)
656 657 if n not in seen:
657 658 seen[n] = 1
658 659 yield (-d, n)
659 660 for p in self.parents(n):
660 661 heapq.heappush(h, (-dist[p], p))
661 662
662 663 def generations(node):
663 664 sg, s = None, {}
664 665 for g,n in ancestors(node):
665 666 if g != sg:
666 667 if sg:
667 668 yield sg, s
668 669 sg, s = g, {n:1}
669 670 else:
670 671 s[n] = 1
671 672 yield sg, s
672 673
673 674 x = generations(a)
674 675 y = generations(b)
675 676 gx = x.next()
676 677 gy = y.next()
677 678
678 679 # increment each ancestor list until it is closer to root than
679 680 # the other, or they match
680 681 while 1:
681 682 #print "ancestor gen %s %s" % (gx[0], gy[0])
682 683 if gx[0] == gy[0]:
683 684 # find the intersection
684 685 i = [ n for n in gx[1] if n in gy[1] ]
685 686 if i:
686 687 return i[0]
687 688 else:
688 689 #print "next"
689 690 gy = y.next()
690 691 gx = x.next()
691 692 elif gx[0] < gy[0]:
692 693 #print "next y"
693 694 gy = y.next()
694 695 else:
695 696 #print "next x"
696 697 gx = x.next()
697 698
698 699 def group(self, nodelist, lookup, infocollect=None):
699 700 """calculate a delta group
700 701
701 702 Given a list of changeset revs, return a set of deltas and
702 703 metadata corresponding to nodes. the first delta is
703 704 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
704 705 have this parent as it has all history before these
705 706 changesets. parent is parent[0]
706 707 """
707 708 revs = [self.rev(n) for n in nodelist]
708 709
709 710 # if we don't have any revisions touched by these changesets, bail
710 711 if not revs:
711 yield struct.pack(">l", 0)
712 yield changegroup.closechunk()
712 713 return
713 714
714 715 # add the parent of the first rev
715 716 p = self.parents(self.node(revs[0]))[0]
716 717 revs.insert(0, self.rev(p))
717 718
718 719 # build deltas
719 720 for d in xrange(0, len(revs) - 1):
720 721 a, b = revs[d], revs[d + 1]
721 722 nb = self.node(b)
722 723
723 724 if infocollect is not None:
724 725 infocollect(nb)
725 726
726 727 d = self.revdiff(a, b)
727 728 p = self.parents(nb)
728 729 meta = nb + p[0] + p[1] + lookup(nb)
729 l = struct.pack(">l", len(meta) + len(d) + 4)
730 yield l
731 yield meta
732 yield d
730 yield changegroup.genchunk("%s%s" % (meta, d))
733 731
734 yield struct.pack(">l", 0)
732 yield changegroup.closechunk()
735 733
736 734 def addgroup(self, revs, linkmapper, transaction, unique=0):
737 735 """
738 736 add a delta group
739 737
740 738 given a set of deltas, add them to the revision log. the
741 739 first delta is against its parent, which should be in our
742 740 log, the rest are against the previous delta.
743 741 """
744 742
745 743 #track the base of the current delta log
746 744 r = self.count()
747 745 t = r - 1
748 746 node = nullid
749 747
750 748 base = prev = -1
751 749 start = end = measure = 0
752 750 if r:
753 751 base = self.base(t)
754 752 start = self.start(base)
755 753 end = self.end(t)
756 754 measure = self.length(base)
757 755 prev = self.tip()
758 756
759 757 transaction.add(self.datafile, end)
760 758 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
761 759 dfh = self.opener(self.datafile, "a")
762 760 ifh = self.opener(self.indexfile, "a")
763 761
764 762 # loop through our set of deltas
765 763 chain = None
766 764 for chunk in revs:
767 765 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
768 766 link = linkmapper(cs)
769 767 if node in self.nodemap:
770 768 # this can happen if two branches make the same change
771 769 # if unique:
772 770 # raise RevlogError(_("already have %s") % hex(node[:4]))
773 771 chain = node
774 772 continue
775 773 delta = chunk[80:]
776 774
777 775 for p in (p1, p2):
778 776 if not p in self.nodemap:
779 777 raise RevlogError(_("unknown parent %s") % short(p1))
780 778
781 779 if not chain:
782 780 # retrieve the parent revision of the delta chain
783 781 chain = p1
784 782 if not chain in self.nodemap:
785 783 raise RevlogError(_("unknown base %s") % short(chain[:4]))
786 784
787 785 # full versions are inserted when the needed deltas become
788 786 # comparable to the uncompressed text or when the previous
789 787 # version is not the one we have a delta against. We use
790 788 # the size of the previous full rev as a proxy for the
791 789 # current size.
792 790
793 791 if chain == prev:
794 792 tempd = compress(delta)
795 793 cdelta = tempd[0] + tempd[1]
796 794
797 795 if chain != prev or (end - start + len(cdelta)) > measure * 2:
798 796 # flush our writes here so we can read it in revision
799 797 dfh.flush()
800 798 ifh.flush()
801 799 text = self.revision(chain)
802 800 text = self.patches(text, [delta])
803 801 chk = self.addrevision(text, transaction, link, p1, p2)
804 802 if chk != node:
805 803 raise RevlogError(_("consistency error adding group"))
806 804 measure = len(text)
807 805 else:
808 806 e = (end, len(cdelta), base, link, p1, p2, node)
809 807 self.index.append(e)
810 808 self.nodemap[node] = r
811 809 dfh.write(cdelta)
812 810 ifh.write(struct.pack(indexformat, *e))
813 811
814 812 t, r, chain, prev = r, r + 1, node, node
815 813 base = self.base(t)
816 814 start = self.start(base)
817 815 end = self.end(t)
818 816
819 817 dfh.close()
820 818 ifh.close()
821 819 return node
822 820
823 821 def strip(self, rev, minlink):
824 822 if self.count() == 0 or rev >= self.count():
825 823 return
826 824
827 825 # When stripping away a revision, we need to make sure it
828 826 # does not actually belong to an older changeset.
829 827 # The minlink parameter defines the oldest revision
830 828 # we're allowed to strip away.
831 829 while minlink > self.index[rev][3]:
832 830 rev += 1
833 831 if rev >= self.count():
834 832 return
835 833
836 834 # first truncate the files on disk
837 835 end = self.start(rev)
838 836 self.opener(self.datafile, "a").truncate(end)
839 837 end = rev * struct.calcsize(indexformat)
840 838 self.opener(self.indexfile, "a").truncate(end)
841 839
842 840 # then reset internal state in memory to forget those revisions
843 841 self.cache = None
844 842 self.chunkcache = None
845 843 for p in self.index[rev:]:
846 844 del self.nodemap[p[6]]
847 845 del self.index[rev:]
848 846
849 847 # truncating the lazyindex also truncates the lazymap.
850 848 if isinstance(self.index, lazyindex):
851 849 self.index.trunc(end)
852 850
853 851
854 852 def checksize(self):
855 853 expected = 0
856 854 if self.count():
857 855 expected = self.end(self.count() - 1)
858 856
859 857 try:
860 858 f = self.opener(self.datafile)
861 859 f.seek(0, 2)
862 860 actual = f.tell()
863 861 dd = actual - expected
864 862 except IOError, inst:
865 863 if inst.errno != errno.ENOENT:
866 864 raise
867 865 dd = 0
868 866
869 867 try:
870 868 f = self.opener(self.indexfile)
871 869 f.seek(0, 2)
872 870 actual = f.tell()
873 871 s = struct.calcsize(indexformat)
874 872 i = actual / s
875 873 di = actual - (i * s)
876 874 except IOError, inst:
877 875 if inst.errno != errno.ENOENT:
878 876 raise
879 877 di = 0
880 878
881 879 return (dd, di)
882 880
883 881
General Comments 0
You need to be logged in to leave comments. Login now