##// END OF EJS Templates
lookup: optimize '.'...
Matt Mackall -
r6736:369ddc9c default
parent child Browse files
Show More
@@ -1,2147 +1,2142
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def url(self):
120 def url(self):
121 return 'file:' + self.root
121 return 'file:' + self.root
122
122
123 def hook(self, name, throw=False, **args):
123 def hook(self, name, throw=False, **args):
124 return hook.hook(self.ui, self, name, throw, **args)
124 return hook.hook(self.ui, self, name, throw, **args)
125
125
126 tag_disallowed = ':\r\n'
126 tag_disallowed = ':\r\n'
127
127
128 def _tag(self, names, node, message, local, user, date, parent=None,
128 def _tag(self, names, node, message, local, user, date, parent=None,
129 extra={}):
129 extra={}):
130 use_dirstate = parent is None
130 use_dirstate = parent is None
131
131
132 if isinstance(names, str):
132 if isinstance(names, str):
133 allchars = names
133 allchars = names
134 names = (names,)
134 names = (names,)
135 else:
135 else:
136 allchars = ''.join(names)
136 allchars = ''.join(names)
137 for c in self.tag_disallowed:
137 for c in self.tag_disallowed:
138 if c in allchars:
138 if c in allchars:
139 raise util.Abort(_('%r cannot be used in a tag name') % c)
139 raise util.Abort(_('%r cannot be used in a tag name') % c)
140
140
141 for name in names:
141 for name in names:
142 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 local=local)
143 local=local)
144
144
145 def writetags(fp, names, munge, prevtags):
145 def writetags(fp, names, munge, prevtags):
146 fp.seek(0, 2)
146 fp.seek(0, 2)
147 if prevtags and prevtags[-1] != '\n':
147 if prevtags and prevtags[-1] != '\n':
148 fp.write('\n')
148 fp.write('\n')
149 for name in names:
149 for name in names:
150 m = munge and munge(name) or name
150 m = munge and munge(name) or name
151 if self._tagstypecache and name in self._tagstypecache:
151 if self._tagstypecache and name in self._tagstypecache:
152 old = self.tagscache.get(name, nullid)
152 old = self.tagscache.get(name, nullid)
153 fp.write('%s %s\n' % (hex(old), m))
153 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(node), m))
154 fp.write('%s %s\n' % (hex(node), m))
155 fp.close()
155 fp.close()
156
156
157 prevtags = ''
157 prevtags = ''
158 if local:
158 if local:
159 try:
159 try:
160 fp = self.opener('localtags', 'r+')
160 fp = self.opener('localtags', 'r+')
161 except IOError, err:
161 except IOError, err:
162 fp = self.opener('localtags', 'a')
162 fp = self.opener('localtags', 'a')
163 else:
163 else:
164 prevtags = fp.read()
164 prevtags = fp.read()
165
165
166 # local tags are stored in the current charset
166 # local tags are stored in the current charset
167 writetags(fp, names, None, prevtags)
167 writetags(fp, names, None, prevtags)
168 for name in names:
168 for name in names:
169 self.hook('tag', node=hex(node), tag=name, local=local)
169 self.hook('tag', node=hex(node), tag=name, local=local)
170 return
170 return
171
171
172 if use_dirstate:
172 if use_dirstate:
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError, err:
175 except IOError, err:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179 else:
179 else:
180 try:
180 try:
181 prevtags = self.filectx('.hgtags', parent).data()
181 prevtags = self.filectx('.hgtags', parent).data()
182 except revlog.LookupError:
182 except revlog.LookupError:
183 pass
183 pass
184 fp = self.wfile('.hgtags', 'wb')
184 fp = self.wfile('.hgtags', 'wb')
185 if prevtags:
185 if prevtags:
186 fp.write(prevtags)
186 fp.write(prevtags)
187
187
188 # committed tags are stored in UTF-8
188 # committed tags are stored in UTF-8
189 writetags(fp, names, util.fromlocal, prevtags)
189 writetags(fp, names, util.fromlocal, prevtags)
190
190
191 if use_dirstate and '.hgtags' not in self.dirstate:
191 if use_dirstate and '.hgtags' not in self.dirstate:
192 self.add(['.hgtags'])
192 self.add(['.hgtags'])
193
193
194 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
194 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
195 extra=extra)
195 extra=extra)
196
196
197 for name in names:
197 for name in names:
198 self.hook('tag', node=hex(node), tag=name, local=local)
198 self.hook('tag', node=hex(node), tag=name, local=local)
199
199
200 return tagnode
200 return tagnode
201
201
202 def tag(self, names, node, message, local, user, date):
202 def tag(self, names, node, message, local, user, date):
203 '''tag a revision with one or more symbolic names.
203 '''tag a revision with one or more symbolic names.
204
204
205 names is a list of strings or, when adding a single tag, names may be a
205 names is a list of strings or, when adding a single tag, names may be a
206 string.
206 string.
207
207
208 if local is True, the tags are stored in a per-repository file.
208 if local is True, the tags are stored in a per-repository file.
209 otherwise, they are stored in the .hgtags file, and a new
209 otherwise, they are stored in the .hgtags file, and a new
210 changeset is committed with the change.
210 changeset is committed with the change.
211
211
212 keyword arguments:
212 keyword arguments:
213
213
214 local: whether to store tags in non-version-controlled file
214 local: whether to store tags in non-version-controlled file
215 (default False)
215 (default False)
216
216
217 message: commit message to use if committing
217 message: commit message to use if committing
218
218
219 user: name of user to use if committing
219 user: name of user to use if committing
220
220
221 date: date tuple to use if committing'''
221 date: date tuple to use if committing'''
222
222
223 for x in self.status()[:5]:
223 for x in self.status()[:5]:
224 if '.hgtags' in x:
224 if '.hgtags' in x:
225 raise util.Abort(_('working copy of .hgtags is changed '
225 raise util.Abort(_('working copy of .hgtags is changed '
226 '(please commit .hgtags manually)'))
226 '(please commit .hgtags manually)'))
227
227
228 self._tag(names, node, message, local, user, date)
228 self._tag(names, node, message, local, user, date)
229
229
230 def tags(self):
230 def tags(self):
231 '''return a mapping of tag to node'''
231 '''return a mapping of tag to node'''
232 if self.tagscache:
232 if self.tagscache:
233 return self.tagscache
233 return self.tagscache
234
234
235 globaltags = {}
235 globaltags = {}
236 tagtypes = {}
236 tagtypes = {}
237
237
238 def readtags(lines, fn, tagtype):
238 def readtags(lines, fn, tagtype):
239 filetags = {}
239 filetags = {}
240 count = 0
240 count = 0
241
241
242 def warn(msg):
242 def warn(msg):
243 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
243 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
244
244
245 for l in lines:
245 for l in lines:
246 count += 1
246 count += 1
247 if not l:
247 if not l:
248 continue
248 continue
249 s = l.split(" ", 1)
249 s = l.split(" ", 1)
250 if len(s) != 2:
250 if len(s) != 2:
251 warn(_("cannot parse entry"))
251 warn(_("cannot parse entry"))
252 continue
252 continue
253 node, key = s
253 node, key = s
254 key = util.tolocal(key.strip()) # stored in UTF-8
254 key = util.tolocal(key.strip()) # stored in UTF-8
255 try:
255 try:
256 bin_n = bin(node)
256 bin_n = bin(node)
257 except TypeError:
257 except TypeError:
258 warn(_("node '%s' is not well formed") % node)
258 warn(_("node '%s' is not well formed") % node)
259 continue
259 continue
260 if bin_n not in self.changelog.nodemap:
260 if bin_n not in self.changelog.nodemap:
261 warn(_("tag '%s' refers to unknown node") % key)
261 warn(_("tag '%s' refers to unknown node") % key)
262 continue
262 continue
263
263
264 h = []
264 h = []
265 if key in filetags:
265 if key in filetags:
266 n, h = filetags[key]
266 n, h = filetags[key]
267 h.append(n)
267 h.append(n)
268 filetags[key] = (bin_n, h)
268 filetags[key] = (bin_n, h)
269
269
270 for k, nh in filetags.items():
270 for k, nh in filetags.items():
271 if k not in globaltags:
271 if k not in globaltags:
272 globaltags[k] = nh
272 globaltags[k] = nh
273 tagtypes[k] = tagtype
273 tagtypes[k] = tagtype
274 continue
274 continue
275
275
276 # we prefer the global tag if:
276 # we prefer the global tag if:
277 # it supercedes us OR
277 # it supercedes us OR
278 # mutual supercedes and it has a higher rank
278 # mutual supercedes and it has a higher rank
279 # otherwise we win because we're tip-most
279 # otherwise we win because we're tip-most
280 an, ah = nh
280 an, ah = nh
281 bn, bh = globaltags[k]
281 bn, bh = globaltags[k]
282 if (bn != an and an in bh and
282 if (bn != an and an in bh and
283 (bn not in ah or len(bh) > len(ah))):
283 (bn not in ah or len(bh) > len(ah))):
284 an = bn
284 an = bn
285 ah.extend([n for n in bh if n not in ah])
285 ah.extend([n for n in bh if n not in ah])
286 globaltags[k] = an, ah
286 globaltags[k] = an, ah
287 tagtypes[k] = tagtype
287 tagtypes[k] = tagtype
288
288
289 # read the tags file from each head, ending with the tip
289 # read the tags file from each head, ending with the tip
290 f = None
290 f = None
291 for rev, node, fnode in self._hgtagsnodes():
291 for rev, node, fnode in self._hgtagsnodes():
292 f = (f and f.filectx(fnode) or
292 f = (f and f.filectx(fnode) or
293 self.filectx('.hgtags', fileid=fnode))
293 self.filectx('.hgtags', fileid=fnode))
294 readtags(f.data().splitlines(), f, "global")
294 readtags(f.data().splitlines(), f, "global")
295
295
296 try:
296 try:
297 data = util.fromlocal(self.opener("localtags").read())
297 data = util.fromlocal(self.opener("localtags").read())
298 # localtags are stored in the local character set
298 # localtags are stored in the local character set
299 # while the internal tag table is stored in UTF-8
299 # while the internal tag table is stored in UTF-8
300 readtags(data.splitlines(), "localtags", "local")
300 readtags(data.splitlines(), "localtags", "local")
301 except IOError:
301 except IOError:
302 pass
302 pass
303
303
304 self.tagscache = {}
304 self.tagscache = {}
305 self._tagstypecache = {}
305 self._tagstypecache = {}
306 for k,nh in globaltags.items():
306 for k,nh in globaltags.items():
307 n = nh[0]
307 n = nh[0]
308 if n != nullid:
308 if n != nullid:
309 self.tagscache[k] = n
309 self.tagscache[k] = n
310 self._tagstypecache[k] = tagtypes[k]
310 self._tagstypecache[k] = tagtypes[k]
311 self.tagscache['tip'] = self.changelog.tip()
311 self.tagscache['tip'] = self.changelog.tip()
312 return self.tagscache
312 return self.tagscache
313
313
314 def tagtype(self, tagname):
314 def tagtype(self, tagname):
315 '''
315 '''
316 return the type of the given tag. result can be:
316 return the type of the given tag. result can be:
317
317
318 'local' : a local tag
318 'local' : a local tag
319 'global' : a global tag
319 'global' : a global tag
320 None : tag does not exist
320 None : tag does not exist
321 '''
321 '''
322
322
323 self.tags()
323 self.tags()
324
324
325 return self._tagstypecache.get(tagname)
325 return self._tagstypecache.get(tagname)
326
326
327 def _hgtagsnodes(self):
327 def _hgtagsnodes(self):
328 heads = self.heads()
328 heads = self.heads()
329 heads.reverse()
329 heads.reverse()
330 last = {}
330 last = {}
331 ret = []
331 ret = []
332 for node in heads:
332 for node in heads:
333 c = self.changectx(node)
333 c = self.changectx(node)
334 rev = c.rev()
334 rev = c.rev()
335 try:
335 try:
336 fnode = c.filenode('.hgtags')
336 fnode = c.filenode('.hgtags')
337 except revlog.LookupError:
337 except revlog.LookupError:
338 continue
338 continue
339 ret.append((rev, node, fnode))
339 ret.append((rev, node, fnode))
340 if fnode in last:
340 if fnode in last:
341 ret[last[fnode]] = None
341 ret[last[fnode]] = None
342 last[fnode] = len(ret) - 1
342 last[fnode] = len(ret) - 1
343 return [item for item in ret if item]
343 return [item for item in ret if item]
344
344
345 def tagslist(self):
345 def tagslist(self):
346 '''return a list of tags ordered by revision'''
346 '''return a list of tags ordered by revision'''
347 l = []
347 l = []
348 for t, n in self.tags().items():
348 for t, n in self.tags().items():
349 try:
349 try:
350 r = self.changelog.rev(n)
350 r = self.changelog.rev(n)
351 except:
351 except:
352 r = -2 # sort to the beginning of the list if unknown
352 r = -2 # sort to the beginning of the list if unknown
353 l.append((r, t, n))
353 l.append((r, t, n))
354 l.sort()
354 l.sort()
355 return [(t, n) for r, t, n in l]
355 return [(t, n) for r, t, n in l]
356
356
357 def nodetags(self, node):
357 def nodetags(self, node):
358 '''return the tags associated with a node'''
358 '''return the tags associated with a node'''
359 if not self.nodetagscache:
359 if not self.nodetagscache:
360 self.nodetagscache = {}
360 self.nodetagscache = {}
361 for t, n in self.tags().items():
361 for t, n in self.tags().items():
362 self.nodetagscache.setdefault(n, []).append(t)
362 self.nodetagscache.setdefault(n, []).append(t)
363 return self.nodetagscache.get(node, [])
363 return self.nodetagscache.get(node, [])
364
364
365 def _branchtags(self, partial, lrev):
365 def _branchtags(self, partial, lrev):
366 tiprev = self.changelog.count() - 1
366 tiprev = self.changelog.count() - 1
367 if lrev != tiprev:
367 if lrev != tiprev:
368 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._updatebranchcache(partial, lrev+1, tiprev+1)
369 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369 self._writebranchcache(partial, self.changelog.tip(), tiprev)
370
370
371 return partial
371 return partial
372
372
373 def branchtags(self):
373 def branchtags(self):
374 tip = self.changelog.tip()
374 tip = self.changelog.tip()
375 if self.branchcache is not None and self._branchcachetip == tip:
375 if self.branchcache is not None and self._branchcachetip == tip:
376 return self.branchcache
376 return self.branchcache
377
377
378 oldtip = self._branchcachetip
378 oldtip = self._branchcachetip
379 self._branchcachetip = tip
379 self._branchcachetip = tip
380 if self.branchcache is None:
380 if self.branchcache is None:
381 self.branchcache = {} # avoid recursion in changectx
381 self.branchcache = {} # avoid recursion in changectx
382 else:
382 else:
383 self.branchcache.clear() # keep using the same dict
383 self.branchcache.clear() # keep using the same dict
384 if oldtip is None or oldtip not in self.changelog.nodemap:
384 if oldtip is None or oldtip not in self.changelog.nodemap:
385 partial, last, lrev = self._readbranchcache()
385 partial, last, lrev = self._readbranchcache()
386 else:
386 else:
387 lrev = self.changelog.rev(oldtip)
387 lrev = self.changelog.rev(oldtip)
388 partial = self._ubranchcache
388 partial = self._ubranchcache
389
389
390 self._branchtags(partial, lrev)
390 self._branchtags(partial, lrev)
391
391
392 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
393 # charset internally
394 for k, v in partial.items():
394 for k, v in partial.items():
395 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
396 self._ubranchcache = partial
396 self._ubranchcache = partial
397 return self.branchcache
397 return self.branchcache
398
398
399 def _readbranchcache(self):
399 def _readbranchcache(self):
400 partial = {}
400 partial = {}
401 try:
401 try:
402 f = self.opener("branch.cache")
402 f = self.opener("branch.cache")
403 lines = f.read().split('\n')
403 lines = f.read().split('\n')
404 f.close()
404 f.close()
405 except (IOError, OSError):
405 except (IOError, OSError):
406 return {}, nullid, nullrev
406 return {}, nullid, nullrev
407
407
408 try:
408 try:
409 last, lrev = lines.pop(0).split(" ", 1)
409 last, lrev = lines.pop(0).split(" ", 1)
410 last, lrev = bin(last), int(lrev)
410 last, lrev = bin(last), int(lrev)
411 if not (lrev < self.changelog.count() and
411 if not (lrev < self.changelog.count() and
412 self.changelog.node(lrev) == last): # sanity check
412 self.changelog.node(lrev) == last): # sanity check
413 # invalidate the cache
413 # invalidate the cache
414 raise ValueError('invalidating branch cache (tip differs)')
414 raise ValueError('invalidating branch cache (tip differs)')
415 for l in lines:
415 for l in lines:
416 if not l: continue
416 if not l: continue
417 node, label = l.split(" ", 1)
417 node, label = l.split(" ", 1)
418 partial[label.strip()] = bin(node)
418 partial[label.strip()] = bin(node)
419 except (KeyboardInterrupt, util.SignalInterrupt):
419 except (KeyboardInterrupt, util.SignalInterrupt):
420 raise
420 raise
421 except Exception, inst:
421 except Exception, inst:
422 if self.ui.debugflag:
422 if self.ui.debugflag:
423 self.ui.warn(str(inst), '\n')
423 self.ui.warn(str(inst), '\n')
424 partial, last, lrev = {}, nullid, nullrev
424 partial, last, lrev = {}, nullid, nullrev
425 return partial, last, lrev
425 return partial, last, lrev
426
426
427 def _writebranchcache(self, branches, tip, tiprev):
427 def _writebranchcache(self, branches, tip, tiprev):
428 try:
428 try:
429 f = self.opener("branch.cache", "w", atomictemp=True)
429 f = self.opener("branch.cache", "w", atomictemp=True)
430 f.write("%s %s\n" % (hex(tip), tiprev))
430 f.write("%s %s\n" % (hex(tip), tiprev))
431 for label, node in branches.iteritems():
431 for label, node in branches.iteritems():
432 f.write("%s %s\n" % (hex(node), label))
432 f.write("%s %s\n" % (hex(node), label))
433 f.rename()
433 f.rename()
434 except (IOError, OSError):
434 except (IOError, OSError):
435 pass
435 pass
436
436
437 def _updatebranchcache(self, partial, start, end):
437 def _updatebranchcache(self, partial, start, end):
438 for r in xrange(start, end):
438 for r in xrange(start, end):
439 c = self.changectx(r)
439 c = self.changectx(r)
440 b = c.branch()
440 b = c.branch()
441 partial[b] = c.node()
441 partial[b] = c.node()
442
442
443 def lookup(self, key):
443 def lookup(self, key):
444 if key == '.':
444 if key == '.':
445 key, second = self.dirstate.parents()
445 return self.dirstate.parents()[0]
446 if key == nullid:
447 raise repo.RepoError(_("no revision checked out"))
448 if second != nullid:
449 self.ui.warn(_("warning: working directory has two parents, "
450 "tag '.' uses the first\n"))
451 elif key == 'null':
446 elif key == 'null':
452 return nullid
447 return nullid
453 n = self.changelog._match(key)
448 n = self.changelog._match(key)
454 if n:
449 if n:
455 return n
450 return n
456 if key in self.tags():
451 if key in self.tags():
457 return self.tags()[key]
452 return self.tags()[key]
458 if key in self.branchtags():
453 if key in self.branchtags():
459 return self.branchtags()[key]
454 return self.branchtags()[key]
460 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
461 if n:
456 if n:
462 return n
457 return n
463 try:
458 try:
464 if len(key) == 20:
459 if len(key) == 20:
465 key = hex(key)
460 key = hex(key)
466 except:
461 except:
467 pass
462 pass
468 raise repo.RepoError(_("unknown revision '%s'") % key)
463 raise repo.RepoError(_("unknown revision '%s'") % key)
469
464
470 def local(self):
465 def local(self):
471 return True
466 return True
472
467
473 def join(self, f):
468 def join(self, f):
474 return os.path.join(self.path, f)
469 return os.path.join(self.path, f)
475
470
476 def sjoin(self, f):
471 def sjoin(self, f):
477 f = self.encodefn(f)
472 f = self.encodefn(f)
478 return os.path.join(self.spath, f)
473 return os.path.join(self.spath, f)
479
474
480 def wjoin(self, f):
475 def wjoin(self, f):
481 return os.path.join(self.root, f)
476 return os.path.join(self.root, f)
482
477
483 def rjoin(self, f):
478 def rjoin(self, f):
484 return os.path.join(self.root, util.pconvert(f))
479 return os.path.join(self.root, util.pconvert(f))
485
480
486 def file(self, f):
481 def file(self, f):
487 if f[0] == '/':
482 if f[0] == '/':
488 f = f[1:]
483 f = f[1:]
489 return filelog.filelog(self.sopener, f)
484 return filelog.filelog(self.sopener, f)
490
485
491 def changectx(self, changeid=None):
486 def changectx(self, changeid=None):
492 return context.changectx(self, changeid)
487 return context.changectx(self, changeid)
493
488
494 def workingctx(self):
489 def workingctx(self):
495 return context.workingctx(self)
490 return context.workingctx(self)
496
491
497 def parents(self, changeid=None):
492 def parents(self, changeid=None):
498 '''
493 '''
499 get list of changectxs for parents of changeid or working directory
494 get list of changectxs for parents of changeid or working directory
500 '''
495 '''
501 if changeid is None:
496 if changeid is None:
502 pl = self.dirstate.parents()
497 pl = self.dirstate.parents()
503 else:
498 else:
504 n = self.changelog.lookup(changeid)
499 n = self.changelog.lookup(changeid)
505 pl = self.changelog.parents(n)
500 pl = self.changelog.parents(n)
506 if pl[1] == nullid:
501 if pl[1] == nullid:
507 return [self.changectx(pl[0])]
502 return [self.changectx(pl[0])]
508 return [self.changectx(pl[0]), self.changectx(pl[1])]
503 return [self.changectx(pl[0]), self.changectx(pl[1])]
509
504
510 def filectx(self, path, changeid=None, fileid=None):
505 def filectx(self, path, changeid=None, fileid=None):
511 """changeid can be a changeset revision, node, or tag.
506 """changeid can be a changeset revision, node, or tag.
512 fileid can be a file revision or node."""
507 fileid can be a file revision or node."""
513 return context.filectx(self, path, changeid, fileid)
508 return context.filectx(self, path, changeid, fileid)
514
509
515 def getcwd(self):
510 def getcwd(self):
516 return self.dirstate.getcwd()
511 return self.dirstate.getcwd()
517
512
518 def pathto(self, f, cwd=None):
513 def pathto(self, f, cwd=None):
519 return self.dirstate.pathto(f, cwd)
514 return self.dirstate.pathto(f, cwd)
520
515
521 def wfile(self, f, mode='r'):
516 def wfile(self, f, mode='r'):
522 return self.wopener(f, mode)
517 return self.wopener(f, mode)
523
518
524 def _link(self, f):
519 def _link(self, f):
525 return os.path.islink(self.wjoin(f))
520 return os.path.islink(self.wjoin(f))
526
521
527 def _filter(self, filter, filename, data):
522 def _filter(self, filter, filename, data):
528 if filter not in self.filterpats:
523 if filter not in self.filterpats:
529 l = []
524 l = []
530 for pat, cmd in self.ui.configitems(filter):
525 for pat, cmd in self.ui.configitems(filter):
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
526 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
527 fn = None
533 params = cmd
528 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
529 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
530 if cmd.startswith(name):
536 fn = filterfn
531 fn = filterfn
537 params = cmd[len(name):].lstrip()
532 params = cmd[len(name):].lstrip()
538 break
533 break
539 if not fn:
534 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
535 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
536 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
537 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
538 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
539 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
540 l.append((mf, fn, params))
546 self.filterpats[filter] = l
541 self.filterpats[filter] = l
547
542
548 for mf, fn, cmd in self.filterpats[filter]:
543 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
544 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
545 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
546 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
547 break
553
548
554 return data
549 return data
555
550
556 def adddatafilter(self, name, filter):
551 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
552 self._datafilters[name] = filter
558
553
559 def wread(self, filename):
554 def wread(self, filename):
560 if self._link(filename):
555 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
556 data = os.readlink(self.wjoin(filename))
562 else:
557 else:
563 data = self.wopener(filename, 'r').read()
558 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
559 return self._filter("encode", filename, data)
565
560
566 def wwrite(self, filename, data, flags):
561 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
562 data = self._filter("decode", filename, data)
568 try:
563 try:
569 os.unlink(self.wjoin(filename))
564 os.unlink(self.wjoin(filename))
570 except OSError:
565 except OSError:
571 pass
566 pass
572 self.wopener(filename, 'w').write(data)
567 self.wopener(filename, 'w').write(data)
573 util.set_flags(self.wjoin(filename), flags)
568 util.set_flags(self.wjoin(filename), flags)
574
569
575 def wwritedata(self, filename, data):
570 def wwritedata(self, filename, data):
576 return self._filter("decode", filename, data)
571 return self._filter("decode", filename, data)
577
572
578 def transaction(self):
573 def transaction(self):
579 if self._transref and self._transref():
574 if self._transref and self._transref():
580 return self._transref().nest()
575 return self._transref().nest()
581
576
582 # abort here if the journal already exists
577 # abort here if the journal already exists
583 if os.path.exists(self.sjoin("journal")):
578 if os.path.exists(self.sjoin("journal")):
584 raise repo.RepoError(_("journal already exists - run hg recover"))
579 raise repo.RepoError(_("journal already exists - run hg recover"))
585
580
586 # save dirstate for rollback
581 # save dirstate for rollback
587 try:
582 try:
588 ds = self.opener("dirstate").read()
583 ds = self.opener("dirstate").read()
589 except IOError:
584 except IOError:
590 ds = ""
585 ds = ""
591 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.dirstate", "w").write(ds)
592 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.branch", "w").write(self.dirstate.branch())
593
588
594 renames = [(self.sjoin("journal"), self.sjoin("undo")),
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
595 (self.join("journal.dirstate"), self.join("undo.dirstate")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
596 (self.join("journal.branch"), self.join("undo.branch"))]
591 (self.join("journal.branch"), self.join("undo.branch"))]
597 tr = transaction.transaction(self.ui.warn, self.sopener,
592 tr = transaction.transaction(self.ui.warn, self.sopener,
598 self.sjoin("journal"),
593 self.sjoin("journal"),
599 aftertrans(renames),
594 aftertrans(renames),
600 self._createmode)
595 self._createmode)
601 self._transref = weakref.ref(tr)
596 self._transref = weakref.ref(tr)
602 return tr
597 return tr
603
598
604 def recover(self):
599 def recover(self):
605 l = self.lock()
600 l = self.lock()
606 try:
601 try:
607 if os.path.exists(self.sjoin("journal")):
602 if os.path.exists(self.sjoin("journal")):
608 self.ui.status(_("rolling back interrupted transaction\n"))
603 self.ui.status(_("rolling back interrupted transaction\n"))
609 transaction.rollback(self.sopener, self.sjoin("journal"))
604 transaction.rollback(self.sopener, self.sjoin("journal"))
610 self.invalidate()
605 self.invalidate()
611 return True
606 return True
612 else:
607 else:
613 self.ui.warn(_("no interrupted transaction available\n"))
608 self.ui.warn(_("no interrupted transaction available\n"))
614 return False
609 return False
615 finally:
610 finally:
616 del l
611 del l
617
612
618 def rollback(self):
613 def rollback(self):
619 wlock = lock = None
614 wlock = lock = None
620 try:
615 try:
621 wlock = self.wlock()
616 wlock = self.wlock()
622 lock = self.lock()
617 lock = self.lock()
623 if os.path.exists(self.sjoin("undo")):
618 if os.path.exists(self.sjoin("undo")):
624 self.ui.status(_("rolling back last transaction\n"))
619 self.ui.status(_("rolling back last transaction\n"))
625 transaction.rollback(self.sopener, self.sjoin("undo"))
620 transaction.rollback(self.sopener, self.sjoin("undo"))
626 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
621 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
627 try:
622 try:
628 branch = self.opener("undo.branch").read()
623 branch = self.opener("undo.branch").read()
629 self.dirstate.setbranch(branch)
624 self.dirstate.setbranch(branch)
630 except IOError:
625 except IOError:
631 self.ui.warn(_("Named branch could not be reset, "
626 self.ui.warn(_("Named branch could not be reset, "
632 "current branch still is: %s\n")
627 "current branch still is: %s\n")
633 % util.tolocal(self.dirstate.branch()))
628 % util.tolocal(self.dirstate.branch()))
634 self.invalidate()
629 self.invalidate()
635 self.dirstate.invalidate()
630 self.dirstate.invalidate()
636 else:
631 else:
637 self.ui.warn(_("no rollback information available\n"))
632 self.ui.warn(_("no rollback information available\n"))
638 finally:
633 finally:
639 del lock, wlock
634 del lock, wlock
640
635
641 def invalidate(self):
636 def invalidate(self):
642 for a in "changelog manifest".split():
637 for a in "changelog manifest".split():
643 if a in self.__dict__:
638 if a in self.__dict__:
644 delattr(self, a)
639 delattr(self, a)
645 self.tagscache = None
640 self.tagscache = None
646 self._tagstypecache = None
641 self._tagstypecache = None
647 self.nodetagscache = None
642 self.nodetagscache = None
648 self.branchcache = None
643 self.branchcache = None
649 self._ubranchcache = None
644 self._ubranchcache = None
650 self._branchcachetip = None
645 self._branchcachetip = None
651
646
652 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
647 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
653 try:
648 try:
654 l = lock.lock(lockname, 0, releasefn, desc=desc)
649 l = lock.lock(lockname, 0, releasefn, desc=desc)
655 except lock.LockHeld, inst:
650 except lock.LockHeld, inst:
656 if not wait:
651 if not wait:
657 raise
652 raise
658 self.ui.warn(_("waiting for lock on %s held by %r\n") %
653 self.ui.warn(_("waiting for lock on %s held by %r\n") %
659 (desc, inst.locker))
654 (desc, inst.locker))
660 # default to 600 seconds timeout
655 # default to 600 seconds timeout
661 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
656 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
662 releasefn, desc=desc)
657 releasefn, desc=desc)
663 if acquirefn:
658 if acquirefn:
664 acquirefn()
659 acquirefn()
665 return l
660 return l
666
661
667 def lock(self, wait=True):
662 def lock(self, wait=True):
668 if self._lockref and self._lockref():
663 if self._lockref and self._lockref():
669 return self._lockref()
664 return self._lockref()
670
665
671 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
666 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
672 _('repository %s') % self.origroot)
667 _('repository %s') % self.origroot)
673 self._lockref = weakref.ref(l)
668 self._lockref = weakref.ref(l)
674 return l
669 return l
675
670
676 def wlock(self, wait=True):
671 def wlock(self, wait=True):
677 if self._wlockref and self._wlockref():
672 if self._wlockref and self._wlockref():
678 return self._wlockref()
673 return self._wlockref()
679
674
680 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
681 self.dirstate.invalidate, _('working directory of %s') %
676 self.dirstate.invalidate, _('working directory of %s') %
682 self.origroot)
677 self.origroot)
683 self._wlockref = weakref.ref(l)
678 self._wlockref = weakref.ref(l)
684 return l
679 return l
685
680
686 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
681 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
687 """
682 """
688 commit an individual file as part of a larger transaction
683 commit an individual file as part of a larger transaction
689 """
684 """
690
685
691 fn = fctx.path()
686 fn = fctx.path()
692 t = fctx.data()
687 t = fctx.data()
693 fl = self.file(fn)
688 fl = self.file(fn)
694 fp1 = manifest1.get(fn, nullid)
689 fp1 = manifest1.get(fn, nullid)
695 fp2 = manifest2.get(fn, nullid)
690 fp2 = manifest2.get(fn, nullid)
696
691
697 meta = {}
692 meta = {}
698 cp = fctx.renamed()
693 cp = fctx.renamed()
699 if cp and cp[0] != fn:
694 if cp and cp[0] != fn:
700 cp = cp[0]
695 cp = cp[0]
701 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
702 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
703 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
704 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
705 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
706 #
701 #
707 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
708 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
709 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
710 # should record that bar descends from
705 # should record that bar descends from
711 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
712 #
707 #
713 # this allows this merge to succeed:
708 # this allows this merge to succeed:
714 #
709 #
715 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
716 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
717 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
718 #
713 #
719 meta["copy"] = cp
714 meta["copy"] = cp
720 if not manifest2: # not a branch merge
715 if not manifest2: # not a branch merge
721 meta["copyrev"] = hex(manifest1[cp])
716 meta["copyrev"] = hex(manifest1[cp])
722 fp2 = nullid
717 fp2 = nullid
723 elif fp2 != nullid: # copied on remote side
718 elif fp2 != nullid: # copied on remote side
724 meta["copyrev"] = hex(manifest1[cp])
719 meta["copyrev"] = hex(manifest1[cp])
725 elif fp1 != nullid: # copied on local side, reversed
720 elif fp1 != nullid: # copied on local side, reversed
726 meta["copyrev"] = hex(manifest2[cp])
721 meta["copyrev"] = hex(manifest2[cp])
727 fp2 = fp1
722 fp2 = fp1
728 elif cp in manifest2: # directory rename on local side
723 elif cp in manifest2: # directory rename on local side
729 meta["copyrev"] = hex(manifest2[cp])
724 meta["copyrev"] = hex(manifest2[cp])
730 else: # directory rename on remote side
725 else: # directory rename on remote side
731 meta["copyrev"] = hex(manifest1[cp])
726 meta["copyrev"] = hex(manifest1[cp])
732 self.ui.debug(_(" %s: copy %s:%s\n") %
727 self.ui.debug(_(" %s: copy %s:%s\n") %
733 (fn, cp, meta["copyrev"]))
728 (fn, cp, meta["copyrev"]))
734 fp1 = nullid
729 fp1 = nullid
735 elif fp2 != nullid:
730 elif fp2 != nullid:
736 # is one parent an ancestor of the other?
731 # is one parent an ancestor of the other?
737 fpa = fl.ancestor(fp1, fp2)
732 fpa = fl.ancestor(fp1, fp2)
738 if fpa == fp1:
733 if fpa == fp1:
739 fp1, fp2 = fp2, nullid
734 fp1, fp2 = fp2, nullid
740 elif fpa == fp2:
735 elif fpa == fp2:
741 fp2 = nullid
736 fp2 = nullid
742
737
743 # is the file unmodified from the parent? report existing entry
738 # is the file unmodified from the parent? report existing entry
744 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
739 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
745 return fp1
740 return fp1
746
741
747 changelist.append(fn)
742 changelist.append(fn)
748 return fl.add(t, meta, tr, linkrev, fp1, fp2)
743 return fl.add(t, meta, tr, linkrev, fp1, fp2)
749
744
750 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
745 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
751 if p1 is None:
746 if p1 is None:
752 p1, p2 = self.dirstate.parents()
747 p1, p2 = self.dirstate.parents()
753 return self.commit(files=files, text=text, user=user, date=date,
748 return self.commit(files=files, text=text, user=user, date=date,
754 p1=p1, p2=p2, extra=extra, empty_ok=True)
749 p1=p1, p2=p2, extra=extra, empty_ok=True)
755
750
756 def commit(self, files=None, text="", user=None, date=None,
751 def commit(self, files=None, text="", user=None, date=None,
757 match=None, force=False, force_editor=False,
752 match=None, force=False, force_editor=False,
758 p1=None, p2=None, extra={}, empty_ok=False):
753 p1=None, p2=None, extra={}, empty_ok=False):
759 wlock = lock = None
754 wlock = lock = None
760 if files:
755 if files:
761 files = util.unique(files)
756 files = util.unique(files)
762 try:
757 try:
763 wlock = self.wlock()
758 wlock = self.wlock()
764 lock = self.lock()
759 lock = self.lock()
765 use_dirstate = (p1 is None) # not rawcommit
760 use_dirstate = (p1 is None) # not rawcommit
766
761
767 if use_dirstate:
762 if use_dirstate:
768 p1, p2 = self.dirstate.parents()
763 p1, p2 = self.dirstate.parents()
769 update_dirstate = True
764 update_dirstate = True
770
765
771 if (not force and p2 != nullid and
766 if (not force and p2 != nullid and
772 (match and (match.files() or match.anypats()))):
767 (match and (match.files() or match.anypats()))):
773 raise util.Abort(_('cannot partially commit a merge '
768 raise util.Abort(_('cannot partially commit a merge '
774 '(do not specify files or patterns)'))
769 '(do not specify files or patterns)'))
775
770
776 if files:
771 if files:
777 modified, removed = [], []
772 modified, removed = [], []
778 for f in files:
773 for f in files:
779 s = self.dirstate[f]
774 s = self.dirstate[f]
780 if s in 'nma':
775 if s in 'nma':
781 modified.append(f)
776 modified.append(f)
782 elif s == 'r':
777 elif s == 'r':
783 removed.append(f)
778 removed.append(f)
784 else:
779 else:
785 self.ui.warn(_("%s not tracked!\n") % f)
780 self.ui.warn(_("%s not tracked!\n") % f)
786 changes = [modified, [], removed, [], []]
781 changes = [modified, [], removed, [], []]
787 else:
782 else:
788 changes = self.status(match=match)
783 changes = self.status(match=match)
789 else:
784 else:
790 p1, p2 = p1, p2 or nullid
785 p1, p2 = p1, p2 or nullid
791 update_dirstate = (self.dirstate.parents()[0] == p1)
786 update_dirstate = (self.dirstate.parents()[0] == p1)
792 changes = [files, [], [], [], []]
787 changes = [files, [], [], [], []]
793
788
794 wctx = context.workingctx(self, (p1, p2), text, user, date,
789 wctx = context.workingctx(self, (p1, p2), text, user, date,
795 extra, changes)
790 extra, changes)
796 return self._commitctx(wctx, force, force_editor, empty_ok,
791 return self._commitctx(wctx, force, force_editor, empty_ok,
797 use_dirstate, update_dirstate)
792 use_dirstate, update_dirstate)
798 finally:
793 finally:
799 del lock, wlock
794 del lock, wlock
800
795
801 def commitctx(self, ctx):
796 def commitctx(self, ctx):
802 wlock = lock = None
797 wlock = lock = None
803 try:
798 try:
804 wlock = self.wlock()
799 wlock = self.wlock()
805 lock = self.lock()
800 lock = self.lock()
806 return self._commitctx(ctx, force=True, force_editor=False,
801 return self._commitctx(ctx, force=True, force_editor=False,
807 empty_ok=True, use_dirstate=False,
802 empty_ok=True, use_dirstate=False,
808 update_dirstate=False)
803 update_dirstate=False)
809 finally:
804 finally:
810 del lock, wlock
805 del lock, wlock
811
806
812 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
807 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
813 use_dirstate=True, update_dirstate=True):
808 use_dirstate=True, update_dirstate=True):
814 tr = None
809 tr = None
815 valid = 0 # don't save the dirstate if this isn't set
810 valid = 0 # don't save the dirstate if this isn't set
816 try:
811 try:
817 commit = wctx.modified() + wctx.added()
812 commit = wctx.modified() + wctx.added()
818 remove = wctx.removed()
813 remove = wctx.removed()
819 extra = wctx.extra().copy()
814 extra = wctx.extra().copy()
820 branchname = extra['branch']
815 branchname = extra['branch']
821 user = wctx.user()
816 user = wctx.user()
822 text = wctx.description()
817 text = wctx.description()
823
818
824 p1, p2 = [p.node() for p in wctx.parents()]
819 p1, p2 = [p.node() for p in wctx.parents()]
825 c1 = self.changelog.read(p1)
820 c1 = self.changelog.read(p1)
826 c2 = self.changelog.read(p2)
821 c2 = self.changelog.read(p2)
827 m1 = self.manifest.read(c1[0]).copy()
822 m1 = self.manifest.read(c1[0]).copy()
828 m2 = self.manifest.read(c2[0])
823 m2 = self.manifest.read(c2[0])
829
824
830 if use_dirstate:
825 if use_dirstate:
831 oldname = c1[5].get("branch") # stored in UTF-8
826 oldname = c1[5].get("branch") # stored in UTF-8
832 if (not commit and not remove and not force and p2 == nullid
827 if (not commit and not remove and not force and p2 == nullid
833 and branchname == oldname):
828 and branchname == oldname):
834 self.ui.status(_("nothing changed\n"))
829 self.ui.status(_("nothing changed\n"))
835 return None
830 return None
836
831
837 xp1 = hex(p1)
832 xp1 = hex(p1)
838 if p2 == nullid: xp2 = ''
833 if p2 == nullid: xp2 = ''
839 else: xp2 = hex(p2)
834 else: xp2 = hex(p2)
840
835
841 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
836 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
842
837
843 tr = self.transaction()
838 tr = self.transaction()
844 trp = weakref.proxy(tr)
839 trp = weakref.proxy(tr)
845
840
846 # check in files
841 # check in files
847 new = {}
842 new = {}
848 changed = []
843 changed = []
849 linkrev = self.changelog.count()
844 linkrev = self.changelog.count()
850 commit.sort()
845 commit.sort()
851 for f in commit:
846 for f in commit:
852 self.ui.note(f + "\n")
847 self.ui.note(f + "\n")
853 try:
848 try:
854 fctx = wctx.filectx(f)
849 fctx = wctx.filectx(f)
855 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
850 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
856 new_exec = fctx.isexec()
851 new_exec = fctx.isexec()
857 new_link = fctx.islink()
852 new_link = fctx.islink()
858 if ((not changed or changed[-1] != f) and
853 if ((not changed or changed[-1] != f) and
859 m2.get(f) != new[f]):
854 m2.get(f) != new[f]):
860 # mention the file in the changelog if some
855 # mention the file in the changelog if some
861 # flag changed, even if there was no content
856 # flag changed, even if there was no content
862 # change.
857 # change.
863 old_exec = m1.execf(f)
858 old_exec = m1.execf(f)
864 old_link = m1.linkf(f)
859 old_link = m1.linkf(f)
865 if old_exec != new_exec or old_link != new_link:
860 if old_exec != new_exec or old_link != new_link:
866 changed.append(f)
861 changed.append(f)
867 m1.set(f, new_exec, new_link)
862 m1.set(f, new_exec, new_link)
868 if use_dirstate:
863 if use_dirstate:
869 self.dirstate.normal(f)
864 self.dirstate.normal(f)
870
865
871 except (OSError, IOError):
866 except (OSError, IOError):
872 if use_dirstate:
867 if use_dirstate:
873 self.ui.warn(_("trouble committing %s!\n") % f)
868 self.ui.warn(_("trouble committing %s!\n") % f)
874 raise
869 raise
875 else:
870 else:
876 remove.append(f)
871 remove.append(f)
877
872
878 # update manifest
873 # update manifest
879 m1.update(new)
874 m1.update(new)
880 remove.sort()
875 remove.sort()
881 removed = []
876 removed = []
882
877
883 for f in remove:
878 for f in remove:
884 if f in m1:
879 if f in m1:
885 del m1[f]
880 del m1[f]
886 removed.append(f)
881 removed.append(f)
887 elif f in m2:
882 elif f in m2:
888 removed.append(f)
883 removed.append(f)
889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
884 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
890 (new, removed))
885 (new, removed))
891
886
892 # add changeset
887 # add changeset
893 if (not empty_ok and not text) or force_editor:
888 if (not empty_ok and not text) or force_editor:
894 edittext = []
889 edittext = []
895 if text:
890 if text:
896 edittext.append(text)
891 edittext.append(text)
897 edittext.append("")
892 edittext.append("")
898 edittext.append(_("HG: Enter commit message."
893 edittext.append(_("HG: Enter commit message."
899 " Lines beginning with 'HG:' are removed."))
894 " Lines beginning with 'HG:' are removed."))
900 edittext.append("HG: --")
895 edittext.append("HG: --")
901 edittext.append("HG: user: %s" % user)
896 edittext.append("HG: user: %s" % user)
902 if p2 != nullid:
897 if p2 != nullid:
903 edittext.append("HG: branch merge")
898 edittext.append("HG: branch merge")
904 if branchname:
899 if branchname:
905 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
900 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.extend(["HG: changed %s" % f for f in changed])
901 edittext.extend(["HG: changed %s" % f for f in changed])
907 edittext.extend(["HG: removed %s" % f for f in removed])
902 edittext.extend(["HG: removed %s" % f for f in removed])
908 if not changed and not remove:
903 if not changed and not remove:
909 edittext.append("HG: no files changed")
904 edittext.append("HG: no files changed")
910 edittext.append("")
905 edittext.append("")
911 # run editor in the repository root
906 # run editor in the repository root
912 olddir = os.getcwd()
907 olddir = os.getcwd()
913 os.chdir(self.root)
908 os.chdir(self.root)
914 text = self.ui.edit("\n".join(edittext), user)
909 text = self.ui.edit("\n".join(edittext), user)
915 os.chdir(olddir)
910 os.chdir(olddir)
916
911
917 lines = [line.rstrip() for line in text.rstrip().splitlines()]
912 lines = [line.rstrip() for line in text.rstrip().splitlines()]
918 while lines and not lines[0]:
913 while lines and not lines[0]:
919 del lines[0]
914 del lines[0]
920 if not lines and use_dirstate:
915 if not lines and use_dirstate:
921 raise util.Abort(_("empty commit message"))
916 raise util.Abort(_("empty commit message"))
922 text = '\n'.join(lines)
917 text = '\n'.join(lines)
923
918
924 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
919 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
925 user, wctx.date(), extra)
920 user, wctx.date(), extra)
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
921 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 parent2=xp2)
922 parent2=xp2)
928 tr.close()
923 tr.close()
929
924
930 if self.branchcache:
925 if self.branchcache:
931 self.branchtags()
926 self.branchtags()
932
927
933 if use_dirstate or update_dirstate:
928 if use_dirstate or update_dirstate:
934 self.dirstate.setparents(n)
929 self.dirstate.setparents(n)
935 if use_dirstate:
930 if use_dirstate:
936 for f in removed:
931 for f in removed:
937 self.dirstate.forget(f)
932 self.dirstate.forget(f)
938 valid = 1 # our dirstate updates are complete
933 valid = 1 # our dirstate updates are complete
939
934
940 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
941 return n
936 return n
942 finally:
937 finally:
943 if not valid: # don't save our updated dirstate
938 if not valid: # don't save our updated dirstate
944 self.dirstate.invalidate()
939 self.dirstate.invalidate()
945 del tr
940 del tr
946
941
947 def walk(self, match, node=None):
942 def walk(self, match, node=None):
948 '''
943 '''
949 walk recursively through the directory tree or a given
944 walk recursively through the directory tree or a given
950 changeset, finding all files matched by the match
945 changeset, finding all files matched by the match
951 function
946 function
952 '''
947 '''
953
948
954 if node:
949 if node:
955 fdict = dict.fromkeys(match.files())
950 fdict = dict.fromkeys(match.files())
956 # for dirstate.walk, files=['.'] means "walk the whole tree".
951 # for dirstate.walk, files=['.'] means "walk the whole tree".
957 # follow that here, too
952 # follow that here, too
958 fdict.pop('.', None)
953 fdict.pop('.', None)
959 mdict = self.manifest.read(self.changelog.read(node)[0])
954 mdict = self.manifest.read(self.changelog.read(node)[0])
960 mfiles = mdict.keys()
955 mfiles = mdict.keys()
961 mfiles.sort()
956 mfiles.sort()
962 for fn in mfiles:
957 for fn in mfiles:
963 for ffn in fdict:
958 for ffn in fdict:
964 # match if the file is the exact name or a directory
959 # match if the file is the exact name or a directory
965 if ffn == fn or fn.startswith("%s/" % ffn):
960 if ffn == fn or fn.startswith("%s/" % ffn):
966 del fdict[ffn]
961 del fdict[ffn]
967 break
962 break
968 if match(fn):
963 if match(fn):
969 yield fn
964 yield fn
970 ffiles = fdict.keys()
965 ffiles = fdict.keys()
971 ffiles.sort()
966 ffiles.sort()
972 for fn in ffiles:
967 for fn in ffiles:
973 if match.bad(fn, 'No such file in rev ' + short(node)) \
968 if match.bad(fn, 'No such file in rev ' + short(node)) \
974 and match(fn):
969 and match(fn):
975 yield fn
970 yield fn
976 else:
971 else:
977 for fn in self.dirstate.walk(match):
972 for fn in self.dirstate.walk(match):
978 yield fn
973 yield fn
979
974
980 def status(self, node1=None, node2=None, match=None,
975 def status(self, node1=None, node2=None, match=None,
981 list_ignored=False, list_clean=False, list_unknown=True):
976 list_ignored=False, list_clean=False, list_unknown=True):
982 """return status of files between two nodes or node and working directory
977 """return status of files between two nodes or node and working directory
983
978
984 If node1 is None, use the first dirstate parent instead.
979 If node1 is None, use the first dirstate parent instead.
985 If node2 is None, compare node1 with working directory.
980 If node2 is None, compare node1 with working directory.
986 """
981 """
987
982
988 def fcmp(fn, getnode):
983 def fcmp(fn, getnode):
989 t1 = self.wread(fn)
984 t1 = self.wread(fn)
990 return self.file(fn).cmp(getnode(fn), t1)
985 return self.file(fn).cmp(getnode(fn), t1)
991
986
992 def mfmatches(node):
987 def mfmatches(node):
993 change = self.changelog.read(node)
988 change = self.changelog.read(node)
994 mf = self.manifest.read(change[0]).copy()
989 mf = self.manifest.read(change[0]).copy()
995 for fn in mf.keys():
990 for fn in mf.keys():
996 if not match(fn):
991 if not match(fn):
997 del mf[fn]
992 del mf[fn]
998 return mf
993 return mf
999
994
1000 if not match:
995 if not match:
1001 match = match_.always(self.root, self.getcwd())
996 match = match_.always(self.root, self.getcwd())
1002
997
1003 modified, added, removed, deleted, unknown = [], [], [], [], []
998 modified, added, removed, deleted, unknown = [], [], [], [], []
1004 ignored, clean = [], []
999 ignored, clean = [], []
1005
1000
1006 compareworking = False
1001 compareworking = False
1007 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1002 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1008 compareworking = True
1003 compareworking = True
1009
1004
1010 if not compareworking:
1005 if not compareworking:
1011 # read the manifest from node1 before the manifest from node2,
1006 # read the manifest from node1 before the manifest from node2,
1012 # so that we'll hit the manifest cache if we're going through
1007 # so that we'll hit the manifest cache if we're going through
1013 # all the revisions in parent->child order.
1008 # all the revisions in parent->child order.
1014 mf1 = mfmatches(node1)
1009 mf1 = mfmatches(node1)
1015
1010
1016 # are we comparing the working directory?
1011 # are we comparing the working directory?
1017 if not node2:
1012 if not node2:
1018 (lookup, modified, added, removed, deleted, unknown,
1013 (lookup, modified, added, removed, deleted, unknown,
1019 ignored, clean) = self.dirstate.status(match, list_ignored,
1014 ignored, clean) = self.dirstate.status(match, list_ignored,
1020 list_clean, list_unknown)
1015 list_clean, list_unknown)
1021 # are we comparing working dir against its parent?
1016 # are we comparing working dir against its parent?
1022 if compareworking:
1017 if compareworking:
1023 if lookup:
1018 if lookup:
1024 fixup = []
1019 fixup = []
1025 # do a full compare of any files that might have changed
1020 # do a full compare of any files that might have changed
1026 ctx = self.changectx()
1021 ctx = self.changectx()
1027 mexec = lambda f: 'x' in ctx.fileflags(f)
1022 mexec = lambda f: 'x' in ctx.fileflags(f)
1028 mlink = lambda f: 'l' in ctx.fileflags(f)
1023 mlink = lambda f: 'l' in ctx.fileflags(f)
1029 is_exec = util.execfunc(self.root, mexec)
1024 is_exec = util.execfunc(self.root, mexec)
1030 is_link = util.linkfunc(self.root, mlink)
1025 is_link = util.linkfunc(self.root, mlink)
1031 def flags(f):
1026 def flags(f):
1032 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1027 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1033 for f in lookup:
1028 for f in lookup:
1034 if (f not in ctx or flags(f) != ctx.fileflags(f)
1029 if (f not in ctx or flags(f) != ctx.fileflags(f)
1035 or ctx[f].cmp(self.wread(f))):
1030 or ctx[f].cmp(self.wread(f))):
1036 modified.append(f)
1031 modified.append(f)
1037 else:
1032 else:
1038 fixup.append(f)
1033 fixup.append(f)
1039 if list_clean:
1034 if list_clean:
1040 clean.append(f)
1035 clean.append(f)
1041
1036
1042 # update dirstate for files that are actually clean
1037 # update dirstate for files that are actually clean
1043 if fixup:
1038 if fixup:
1044 wlock = None
1039 wlock = None
1045 try:
1040 try:
1046 try:
1041 try:
1047 wlock = self.wlock(False)
1042 wlock = self.wlock(False)
1048 except lock.LockException:
1043 except lock.LockException:
1049 pass
1044 pass
1050 if wlock:
1045 if wlock:
1051 for f in fixup:
1046 for f in fixup:
1052 self.dirstate.normal(f)
1047 self.dirstate.normal(f)
1053 finally:
1048 finally:
1054 del wlock
1049 del wlock
1055 else:
1050 else:
1056 # we are comparing working dir against non-parent
1051 # we are comparing working dir against non-parent
1057 # generate a pseudo-manifest for the working dir
1052 # generate a pseudo-manifest for the working dir
1058 # XXX: create it in dirstate.py ?
1053 # XXX: create it in dirstate.py ?
1059 mf2 = mfmatches(self.dirstate.parents()[0])
1054 mf2 = mfmatches(self.dirstate.parents()[0])
1060 is_exec = util.execfunc(self.root, mf2.execf)
1055 is_exec = util.execfunc(self.root, mf2.execf)
1061 is_link = util.linkfunc(self.root, mf2.linkf)
1056 is_link = util.linkfunc(self.root, mf2.linkf)
1062 for f in lookup + modified + added:
1057 for f in lookup + modified + added:
1063 mf2[f] = ""
1058 mf2[f] = ""
1064 mf2.set(f, is_exec(f), is_link(f))
1059 mf2.set(f, is_exec(f), is_link(f))
1065 for f in removed:
1060 for f in removed:
1066 if f in mf2:
1061 if f in mf2:
1067 del mf2[f]
1062 del mf2[f]
1068
1063
1069 else:
1064 else:
1070 # we are comparing two revisions
1065 # we are comparing two revisions
1071 mf2 = mfmatches(node2)
1066 mf2 = mfmatches(node2)
1072
1067
1073 if not compareworking:
1068 if not compareworking:
1074 # flush lists from dirstate before comparing manifests
1069 # flush lists from dirstate before comparing manifests
1075 modified, added, clean = [], [], []
1070 modified, added, clean = [], [], []
1076
1071
1077 # make sure to sort the files so we talk to the disk in a
1072 # make sure to sort the files so we talk to the disk in a
1078 # reasonable order
1073 # reasonable order
1079 mf2keys = mf2.keys()
1074 mf2keys = mf2.keys()
1080 mf2keys.sort()
1075 mf2keys.sort()
1081 getnode = lambda fn: mf1.get(fn, nullid)
1076 getnode = lambda fn: mf1.get(fn, nullid)
1082 for fn in mf2keys:
1077 for fn in mf2keys:
1083 if fn in mf1:
1078 if fn in mf1:
1084 if (mf1.flags(fn) != mf2.flags(fn) or
1079 if (mf1.flags(fn) != mf2.flags(fn) or
1085 (mf1[fn] != mf2[fn] and
1080 (mf1[fn] != mf2[fn] and
1086 (mf2[fn] != "" or fcmp(fn, getnode)))):
1081 (mf2[fn] != "" or fcmp(fn, getnode)))):
1087 modified.append(fn)
1082 modified.append(fn)
1088 elif list_clean:
1083 elif list_clean:
1089 clean.append(fn)
1084 clean.append(fn)
1090 del mf1[fn]
1085 del mf1[fn]
1091 else:
1086 else:
1092 added.append(fn)
1087 added.append(fn)
1093
1088
1094 removed = mf1.keys()
1089 removed = mf1.keys()
1095
1090
1096 # sort and return results:
1091 # sort and return results:
1097 for l in modified, added, removed, deleted, unknown, ignored, clean:
1092 for l in modified, added, removed, deleted, unknown, ignored, clean:
1098 l.sort()
1093 l.sort()
1099 return (modified, added, removed, deleted, unknown, ignored, clean)
1094 return (modified, added, removed, deleted, unknown, ignored, clean)
1100
1095
1101 def add(self, list):
1096 def add(self, list):
1102 wlock = self.wlock()
1097 wlock = self.wlock()
1103 try:
1098 try:
1104 rejected = []
1099 rejected = []
1105 for f in list:
1100 for f in list:
1106 p = self.wjoin(f)
1101 p = self.wjoin(f)
1107 try:
1102 try:
1108 st = os.lstat(p)
1103 st = os.lstat(p)
1109 except:
1104 except:
1110 self.ui.warn(_("%s does not exist!\n") % f)
1105 self.ui.warn(_("%s does not exist!\n") % f)
1111 rejected.append(f)
1106 rejected.append(f)
1112 continue
1107 continue
1113 if st.st_size > 10000000:
1108 if st.st_size > 10000000:
1114 self.ui.warn(_("%s: files over 10MB may cause memory and"
1109 self.ui.warn(_("%s: files over 10MB may cause memory and"
1115 " performance problems\n"
1110 " performance problems\n"
1116 "(use 'hg revert %s' to unadd the file)\n")
1111 "(use 'hg revert %s' to unadd the file)\n")
1117 % (f, f))
1112 % (f, f))
1118 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1113 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1119 self.ui.warn(_("%s not added: only files and symlinks "
1114 self.ui.warn(_("%s not added: only files and symlinks "
1120 "supported currently\n") % f)
1115 "supported currently\n") % f)
1121 rejected.append(p)
1116 rejected.append(p)
1122 elif self.dirstate[f] in 'amn':
1117 elif self.dirstate[f] in 'amn':
1123 self.ui.warn(_("%s already tracked!\n") % f)
1118 self.ui.warn(_("%s already tracked!\n") % f)
1124 elif self.dirstate[f] == 'r':
1119 elif self.dirstate[f] == 'r':
1125 self.dirstate.normallookup(f)
1120 self.dirstate.normallookup(f)
1126 else:
1121 else:
1127 self.dirstate.add(f)
1122 self.dirstate.add(f)
1128 return rejected
1123 return rejected
1129 finally:
1124 finally:
1130 del wlock
1125 del wlock
1131
1126
1132 def forget(self, list):
1127 def forget(self, list):
1133 wlock = self.wlock()
1128 wlock = self.wlock()
1134 try:
1129 try:
1135 for f in list:
1130 for f in list:
1136 if self.dirstate[f] != 'a':
1131 if self.dirstate[f] != 'a':
1137 self.ui.warn(_("%s not added!\n") % f)
1132 self.ui.warn(_("%s not added!\n") % f)
1138 else:
1133 else:
1139 self.dirstate.forget(f)
1134 self.dirstate.forget(f)
1140 finally:
1135 finally:
1141 del wlock
1136 del wlock
1142
1137
1143 def remove(self, list, unlink=False):
1138 def remove(self, list, unlink=False):
1144 wlock = None
1139 wlock = None
1145 try:
1140 try:
1146 if unlink:
1141 if unlink:
1147 for f in list:
1142 for f in list:
1148 try:
1143 try:
1149 util.unlink(self.wjoin(f))
1144 util.unlink(self.wjoin(f))
1150 except OSError, inst:
1145 except OSError, inst:
1151 if inst.errno != errno.ENOENT:
1146 if inst.errno != errno.ENOENT:
1152 raise
1147 raise
1153 wlock = self.wlock()
1148 wlock = self.wlock()
1154 for f in list:
1149 for f in list:
1155 if unlink and os.path.exists(self.wjoin(f)):
1150 if unlink and os.path.exists(self.wjoin(f)):
1156 self.ui.warn(_("%s still exists!\n") % f)
1151 self.ui.warn(_("%s still exists!\n") % f)
1157 elif self.dirstate[f] == 'a':
1152 elif self.dirstate[f] == 'a':
1158 self.dirstate.forget(f)
1153 self.dirstate.forget(f)
1159 elif f not in self.dirstate:
1154 elif f not in self.dirstate:
1160 self.ui.warn(_("%s not tracked!\n") % f)
1155 self.ui.warn(_("%s not tracked!\n") % f)
1161 else:
1156 else:
1162 self.dirstate.remove(f)
1157 self.dirstate.remove(f)
1163 finally:
1158 finally:
1164 del wlock
1159 del wlock
1165
1160
1166 def undelete(self, list):
1161 def undelete(self, list):
1167 wlock = None
1162 wlock = None
1168 try:
1163 try:
1169 manifests = [self.manifest.read(self.changelog.read(p)[0])
1164 manifests = [self.manifest.read(self.changelog.read(p)[0])
1170 for p in self.dirstate.parents() if p != nullid]
1165 for p in self.dirstate.parents() if p != nullid]
1171 wlock = self.wlock()
1166 wlock = self.wlock()
1172 for f in list:
1167 for f in list:
1173 if self.dirstate[f] != 'r':
1168 if self.dirstate[f] != 'r':
1174 self.ui.warn("%s not removed!\n" % f)
1169 self.ui.warn("%s not removed!\n" % f)
1175 else:
1170 else:
1176 m = f in manifests[0] and manifests[0] or manifests[1]
1171 m = f in manifests[0] and manifests[0] or manifests[1]
1177 t = self.file(f).read(m[f])
1172 t = self.file(f).read(m[f])
1178 self.wwrite(f, t, m.flags(f))
1173 self.wwrite(f, t, m.flags(f))
1179 self.dirstate.normal(f)
1174 self.dirstate.normal(f)
1180 finally:
1175 finally:
1181 del wlock
1176 del wlock
1182
1177
1183 def copy(self, source, dest):
1178 def copy(self, source, dest):
1184 wlock = None
1179 wlock = None
1185 try:
1180 try:
1186 p = self.wjoin(dest)
1181 p = self.wjoin(dest)
1187 if not (os.path.exists(p) or os.path.islink(p)):
1182 if not (os.path.exists(p) or os.path.islink(p)):
1188 self.ui.warn(_("%s does not exist!\n") % dest)
1183 self.ui.warn(_("%s does not exist!\n") % dest)
1189 elif not (os.path.isfile(p) or os.path.islink(p)):
1184 elif not (os.path.isfile(p) or os.path.islink(p)):
1190 self.ui.warn(_("copy failed: %s is not a file or a "
1185 self.ui.warn(_("copy failed: %s is not a file or a "
1191 "symbolic link\n") % dest)
1186 "symbolic link\n") % dest)
1192 else:
1187 else:
1193 wlock = self.wlock()
1188 wlock = self.wlock()
1194 if dest not in self.dirstate:
1189 if dest not in self.dirstate:
1195 self.dirstate.add(dest)
1190 self.dirstate.add(dest)
1196 self.dirstate.copy(source, dest)
1191 self.dirstate.copy(source, dest)
1197 finally:
1192 finally:
1198 del wlock
1193 del wlock
1199
1194
1200 def heads(self, start=None):
1195 def heads(self, start=None):
1201 heads = self.changelog.heads(start)
1196 heads = self.changelog.heads(start)
1202 # sort the output in rev descending order
1197 # sort the output in rev descending order
1203 heads = [(-self.changelog.rev(h), h) for h in heads]
1198 heads = [(-self.changelog.rev(h), h) for h in heads]
1204 heads.sort()
1199 heads.sort()
1205 return [n for (r, n) in heads]
1200 return [n for (r, n) in heads]
1206
1201
1207 def branchheads(self, branch=None, start=None):
1202 def branchheads(self, branch=None, start=None):
1208 branch = branch is None and self.workingctx().branch() or branch
1203 branch = branch is None and self.workingctx().branch() or branch
1209 branches = self.branchtags()
1204 branches = self.branchtags()
1210 if branch not in branches:
1205 if branch not in branches:
1211 return []
1206 return []
1212 # The basic algorithm is this:
1207 # The basic algorithm is this:
1213 #
1208 #
1214 # Start from the branch tip since there are no later revisions that can
1209 # Start from the branch tip since there are no later revisions that can
1215 # possibly be in this branch, and the tip is a guaranteed head.
1210 # possibly be in this branch, and the tip is a guaranteed head.
1216 #
1211 #
1217 # Remember the tip's parents as the first ancestors, since these by
1212 # Remember the tip's parents as the first ancestors, since these by
1218 # definition are not heads.
1213 # definition are not heads.
1219 #
1214 #
1220 # Step backwards from the brach tip through all the revisions. We are
1215 # Step backwards from the brach tip through all the revisions. We are
1221 # guaranteed by the rules of Mercurial that we will now be visiting the
1216 # guaranteed by the rules of Mercurial that we will now be visiting the
1222 # nodes in reverse topological order (children before parents).
1217 # nodes in reverse topological order (children before parents).
1223 #
1218 #
1224 # If a revision is one of the ancestors of a head then we can toss it
1219 # If a revision is one of the ancestors of a head then we can toss it
1225 # out of the ancestors set (we've already found it and won't be
1220 # out of the ancestors set (we've already found it and won't be
1226 # visiting it again) and put its parents in the ancestors set.
1221 # visiting it again) and put its parents in the ancestors set.
1227 #
1222 #
1228 # Otherwise, if a revision is in the branch it's another head, since it
1223 # Otherwise, if a revision is in the branch it's another head, since it
1229 # wasn't in the ancestor list of an existing head. So add it to the
1224 # wasn't in the ancestor list of an existing head. So add it to the
1230 # head list, and add its parents to the ancestor list.
1225 # head list, and add its parents to the ancestor list.
1231 #
1226 #
1232 # If it is not in the branch ignore it.
1227 # If it is not in the branch ignore it.
1233 #
1228 #
1234 # Once we have a list of heads, use nodesbetween to filter out all the
1229 # Once we have a list of heads, use nodesbetween to filter out all the
1235 # heads that cannot be reached from startrev. There may be a more
1230 # heads that cannot be reached from startrev. There may be a more
1236 # efficient way to do this as part of the previous algorithm.
1231 # efficient way to do this as part of the previous algorithm.
1237
1232
1238 set = util.set
1233 set = util.set
1239 heads = [self.changelog.rev(branches[branch])]
1234 heads = [self.changelog.rev(branches[branch])]
1240 # Don't care if ancestors contains nullrev or not.
1235 # Don't care if ancestors contains nullrev or not.
1241 ancestors = set(self.changelog.parentrevs(heads[0]))
1236 ancestors = set(self.changelog.parentrevs(heads[0]))
1242 for rev in xrange(heads[0] - 1, nullrev, -1):
1237 for rev in xrange(heads[0] - 1, nullrev, -1):
1243 if rev in ancestors:
1238 if rev in ancestors:
1244 ancestors.update(self.changelog.parentrevs(rev))
1239 ancestors.update(self.changelog.parentrevs(rev))
1245 ancestors.remove(rev)
1240 ancestors.remove(rev)
1246 elif self.changectx(rev).branch() == branch:
1241 elif self.changectx(rev).branch() == branch:
1247 heads.append(rev)
1242 heads.append(rev)
1248 ancestors.update(self.changelog.parentrevs(rev))
1243 ancestors.update(self.changelog.parentrevs(rev))
1249 heads = [self.changelog.node(rev) for rev in heads]
1244 heads = [self.changelog.node(rev) for rev in heads]
1250 if start is not None:
1245 if start is not None:
1251 heads = self.changelog.nodesbetween([start], heads)[2]
1246 heads = self.changelog.nodesbetween([start], heads)[2]
1252 return heads
1247 return heads
1253
1248
1254 def branches(self, nodes):
1249 def branches(self, nodes):
1255 if not nodes:
1250 if not nodes:
1256 nodes = [self.changelog.tip()]
1251 nodes = [self.changelog.tip()]
1257 b = []
1252 b = []
1258 for n in nodes:
1253 for n in nodes:
1259 t = n
1254 t = n
1260 while 1:
1255 while 1:
1261 p = self.changelog.parents(n)
1256 p = self.changelog.parents(n)
1262 if p[1] != nullid or p[0] == nullid:
1257 if p[1] != nullid or p[0] == nullid:
1263 b.append((t, n, p[0], p[1]))
1258 b.append((t, n, p[0], p[1]))
1264 break
1259 break
1265 n = p[0]
1260 n = p[0]
1266 return b
1261 return b
1267
1262
1268 def between(self, pairs):
1263 def between(self, pairs):
1269 r = []
1264 r = []
1270
1265
1271 for top, bottom in pairs:
1266 for top, bottom in pairs:
1272 n, l, i = top, [], 0
1267 n, l, i = top, [], 0
1273 f = 1
1268 f = 1
1274
1269
1275 while n != bottom:
1270 while n != bottom:
1276 p = self.changelog.parents(n)[0]
1271 p = self.changelog.parents(n)[0]
1277 if i == f:
1272 if i == f:
1278 l.append(n)
1273 l.append(n)
1279 f = f * 2
1274 f = f * 2
1280 n = p
1275 n = p
1281 i += 1
1276 i += 1
1282
1277
1283 r.append(l)
1278 r.append(l)
1284
1279
1285 return r
1280 return r
1286
1281
1287 def findincoming(self, remote, base=None, heads=None, force=False):
1282 def findincoming(self, remote, base=None, heads=None, force=False):
1288 """Return list of roots of the subsets of missing nodes from remote
1283 """Return list of roots of the subsets of missing nodes from remote
1289
1284
1290 If base dict is specified, assume that these nodes and their parents
1285 If base dict is specified, assume that these nodes and their parents
1291 exist on the remote side and that no child of a node of base exists
1286 exist on the remote side and that no child of a node of base exists
1292 in both remote and self.
1287 in both remote and self.
1293 Furthermore base will be updated to include the nodes that exists
1288 Furthermore base will be updated to include the nodes that exists
1294 in self and remote but no children exists in self and remote.
1289 in self and remote but no children exists in self and remote.
1295 If a list of heads is specified, return only nodes which are heads
1290 If a list of heads is specified, return only nodes which are heads
1296 or ancestors of these heads.
1291 or ancestors of these heads.
1297
1292
1298 All the ancestors of base are in self and in remote.
1293 All the ancestors of base are in self and in remote.
1299 All the descendants of the list returned are missing in self.
1294 All the descendants of the list returned are missing in self.
1300 (and so we know that the rest of the nodes are missing in remote, see
1295 (and so we know that the rest of the nodes are missing in remote, see
1301 outgoing)
1296 outgoing)
1302 """
1297 """
1303 m = self.changelog.nodemap
1298 m = self.changelog.nodemap
1304 search = []
1299 search = []
1305 fetch = {}
1300 fetch = {}
1306 seen = {}
1301 seen = {}
1307 seenbranch = {}
1302 seenbranch = {}
1308 if base == None:
1303 if base == None:
1309 base = {}
1304 base = {}
1310
1305
1311 if not heads:
1306 if not heads:
1312 heads = remote.heads()
1307 heads = remote.heads()
1313
1308
1314 if self.changelog.tip() == nullid:
1309 if self.changelog.tip() == nullid:
1315 base[nullid] = 1
1310 base[nullid] = 1
1316 if heads != [nullid]:
1311 if heads != [nullid]:
1317 return [nullid]
1312 return [nullid]
1318 return []
1313 return []
1319
1314
1320 # assume we're closer to the tip than the root
1315 # assume we're closer to the tip than the root
1321 # and start by examining the heads
1316 # and start by examining the heads
1322 self.ui.status(_("searching for changes\n"))
1317 self.ui.status(_("searching for changes\n"))
1323
1318
1324 unknown = []
1319 unknown = []
1325 for h in heads:
1320 for h in heads:
1326 if h not in m:
1321 if h not in m:
1327 unknown.append(h)
1322 unknown.append(h)
1328 else:
1323 else:
1329 base[h] = 1
1324 base[h] = 1
1330
1325
1331 if not unknown:
1326 if not unknown:
1332 return []
1327 return []
1333
1328
1334 req = dict.fromkeys(unknown)
1329 req = dict.fromkeys(unknown)
1335 reqcnt = 0
1330 reqcnt = 0
1336
1331
1337 # search through remote branches
1332 # search through remote branches
1338 # a 'branch' here is a linear segment of history, with four parts:
1333 # a 'branch' here is a linear segment of history, with four parts:
1339 # head, root, first parent, second parent
1334 # head, root, first parent, second parent
1340 # (a branch always has two parents (or none) by definition)
1335 # (a branch always has two parents (or none) by definition)
1341 unknown = remote.branches(unknown)
1336 unknown = remote.branches(unknown)
1342 while unknown:
1337 while unknown:
1343 r = []
1338 r = []
1344 while unknown:
1339 while unknown:
1345 n = unknown.pop(0)
1340 n = unknown.pop(0)
1346 if n[0] in seen:
1341 if n[0] in seen:
1347 continue
1342 continue
1348
1343
1349 self.ui.debug(_("examining %s:%s\n")
1344 self.ui.debug(_("examining %s:%s\n")
1350 % (short(n[0]), short(n[1])))
1345 % (short(n[0]), short(n[1])))
1351 if n[0] == nullid: # found the end of the branch
1346 if n[0] == nullid: # found the end of the branch
1352 pass
1347 pass
1353 elif n in seenbranch:
1348 elif n in seenbranch:
1354 self.ui.debug(_("branch already found\n"))
1349 self.ui.debug(_("branch already found\n"))
1355 continue
1350 continue
1356 elif n[1] and n[1] in m: # do we know the base?
1351 elif n[1] and n[1] in m: # do we know the base?
1357 self.ui.debug(_("found incomplete branch %s:%s\n")
1352 self.ui.debug(_("found incomplete branch %s:%s\n")
1358 % (short(n[0]), short(n[1])))
1353 % (short(n[0]), short(n[1])))
1359 search.append(n) # schedule branch range for scanning
1354 search.append(n) # schedule branch range for scanning
1360 seenbranch[n] = 1
1355 seenbranch[n] = 1
1361 else:
1356 else:
1362 if n[1] not in seen and n[1] not in fetch:
1357 if n[1] not in seen and n[1] not in fetch:
1363 if n[2] in m and n[3] in m:
1358 if n[2] in m and n[3] in m:
1364 self.ui.debug(_("found new changeset %s\n") %
1359 self.ui.debug(_("found new changeset %s\n") %
1365 short(n[1]))
1360 short(n[1]))
1366 fetch[n[1]] = 1 # earliest unknown
1361 fetch[n[1]] = 1 # earliest unknown
1367 for p in n[2:4]:
1362 for p in n[2:4]:
1368 if p in m:
1363 if p in m:
1369 base[p] = 1 # latest known
1364 base[p] = 1 # latest known
1370
1365
1371 for p in n[2:4]:
1366 for p in n[2:4]:
1372 if p not in req and p not in m:
1367 if p not in req and p not in m:
1373 r.append(p)
1368 r.append(p)
1374 req[p] = 1
1369 req[p] = 1
1375 seen[n[0]] = 1
1370 seen[n[0]] = 1
1376
1371
1377 if r:
1372 if r:
1378 reqcnt += 1
1373 reqcnt += 1
1379 self.ui.debug(_("request %d: %s\n") %
1374 self.ui.debug(_("request %d: %s\n") %
1380 (reqcnt, " ".join(map(short, r))))
1375 (reqcnt, " ".join(map(short, r))))
1381 for p in xrange(0, len(r), 10):
1376 for p in xrange(0, len(r), 10):
1382 for b in remote.branches(r[p:p+10]):
1377 for b in remote.branches(r[p:p+10]):
1383 self.ui.debug(_("received %s:%s\n") %
1378 self.ui.debug(_("received %s:%s\n") %
1384 (short(b[0]), short(b[1])))
1379 (short(b[0]), short(b[1])))
1385 unknown.append(b)
1380 unknown.append(b)
1386
1381
1387 # do binary search on the branches we found
1382 # do binary search on the branches we found
1388 while search:
1383 while search:
1389 n = search.pop(0)
1384 n = search.pop(0)
1390 reqcnt += 1
1385 reqcnt += 1
1391 l = remote.between([(n[0], n[1])])[0]
1386 l = remote.between([(n[0], n[1])])[0]
1392 l.append(n[1])
1387 l.append(n[1])
1393 p = n[0]
1388 p = n[0]
1394 f = 1
1389 f = 1
1395 for i in l:
1390 for i in l:
1396 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1391 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1397 if i in m:
1392 if i in m:
1398 if f <= 2:
1393 if f <= 2:
1399 self.ui.debug(_("found new branch changeset %s\n") %
1394 self.ui.debug(_("found new branch changeset %s\n") %
1400 short(p))
1395 short(p))
1401 fetch[p] = 1
1396 fetch[p] = 1
1402 base[i] = 1
1397 base[i] = 1
1403 else:
1398 else:
1404 self.ui.debug(_("narrowed branch search to %s:%s\n")
1399 self.ui.debug(_("narrowed branch search to %s:%s\n")
1405 % (short(p), short(i)))
1400 % (short(p), short(i)))
1406 search.append((p, i))
1401 search.append((p, i))
1407 break
1402 break
1408 p, f = i, f * 2
1403 p, f = i, f * 2
1409
1404
1410 # sanity check our fetch list
1405 # sanity check our fetch list
1411 for f in fetch.keys():
1406 for f in fetch.keys():
1412 if f in m:
1407 if f in m:
1413 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1408 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1414
1409
1415 if base.keys() == [nullid]:
1410 if base.keys() == [nullid]:
1416 if force:
1411 if force:
1417 self.ui.warn(_("warning: repository is unrelated\n"))
1412 self.ui.warn(_("warning: repository is unrelated\n"))
1418 else:
1413 else:
1419 raise util.Abort(_("repository is unrelated"))
1414 raise util.Abort(_("repository is unrelated"))
1420
1415
1421 self.ui.debug(_("found new changesets starting at ") +
1416 self.ui.debug(_("found new changesets starting at ") +
1422 " ".join([short(f) for f in fetch]) + "\n")
1417 " ".join([short(f) for f in fetch]) + "\n")
1423
1418
1424 self.ui.debug(_("%d total queries\n") % reqcnt)
1419 self.ui.debug(_("%d total queries\n") % reqcnt)
1425
1420
1426 return fetch.keys()
1421 return fetch.keys()
1427
1422
1428 def findoutgoing(self, remote, base=None, heads=None, force=False):
1423 def findoutgoing(self, remote, base=None, heads=None, force=False):
1429 """Return list of nodes that are roots of subsets not in remote
1424 """Return list of nodes that are roots of subsets not in remote
1430
1425
1431 If base dict is specified, assume that these nodes and their parents
1426 If base dict is specified, assume that these nodes and their parents
1432 exist on the remote side.
1427 exist on the remote side.
1433 If a list of heads is specified, return only nodes which are heads
1428 If a list of heads is specified, return only nodes which are heads
1434 or ancestors of these heads, and return a second element which
1429 or ancestors of these heads, and return a second element which
1435 contains all remote heads which get new children.
1430 contains all remote heads which get new children.
1436 """
1431 """
1437 if base == None:
1432 if base == None:
1438 base = {}
1433 base = {}
1439 self.findincoming(remote, base, heads, force=force)
1434 self.findincoming(remote, base, heads, force=force)
1440
1435
1441 self.ui.debug(_("common changesets up to ")
1436 self.ui.debug(_("common changesets up to ")
1442 + " ".join(map(short, base.keys())) + "\n")
1437 + " ".join(map(short, base.keys())) + "\n")
1443
1438
1444 remain = dict.fromkeys(self.changelog.nodemap)
1439 remain = dict.fromkeys(self.changelog.nodemap)
1445
1440
1446 # prune everything remote has from the tree
1441 # prune everything remote has from the tree
1447 del remain[nullid]
1442 del remain[nullid]
1448 remove = base.keys()
1443 remove = base.keys()
1449 while remove:
1444 while remove:
1450 n = remove.pop(0)
1445 n = remove.pop(0)
1451 if n in remain:
1446 if n in remain:
1452 del remain[n]
1447 del remain[n]
1453 for p in self.changelog.parents(n):
1448 for p in self.changelog.parents(n):
1454 remove.append(p)
1449 remove.append(p)
1455
1450
1456 # find every node whose parents have been pruned
1451 # find every node whose parents have been pruned
1457 subset = []
1452 subset = []
1458 # find every remote head that will get new children
1453 # find every remote head that will get new children
1459 updated_heads = {}
1454 updated_heads = {}
1460 for n in remain:
1455 for n in remain:
1461 p1, p2 = self.changelog.parents(n)
1456 p1, p2 = self.changelog.parents(n)
1462 if p1 not in remain and p2 not in remain:
1457 if p1 not in remain and p2 not in remain:
1463 subset.append(n)
1458 subset.append(n)
1464 if heads:
1459 if heads:
1465 if p1 in heads:
1460 if p1 in heads:
1466 updated_heads[p1] = True
1461 updated_heads[p1] = True
1467 if p2 in heads:
1462 if p2 in heads:
1468 updated_heads[p2] = True
1463 updated_heads[p2] = True
1469
1464
1470 # this is the set of all roots we have to push
1465 # this is the set of all roots we have to push
1471 if heads:
1466 if heads:
1472 return subset, updated_heads.keys()
1467 return subset, updated_heads.keys()
1473 else:
1468 else:
1474 return subset
1469 return subset
1475
1470
1476 def pull(self, remote, heads=None, force=False):
1471 def pull(self, remote, heads=None, force=False):
1477 lock = self.lock()
1472 lock = self.lock()
1478 try:
1473 try:
1479 fetch = self.findincoming(remote, heads=heads, force=force)
1474 fetch = self.findincoming(remote, heads=heads, force=force)
1480 if fetch == [nullid]:
1475 if fetch == [nullid]:
1481 self.ui.status(_("requesting all changes\n"))
1476 self.ui.status(_("requesting all changes\n"))
1482
1477
1483 if not fetch:
1478 if not fetch:
1484 self.ui.status(_("no changes found\n"))
1479 self.ui.status(_("no changes found\n"))
1485 return 0
1480 return 0
1486
1481
1487 if heads is None:
1482 if heads is None:
1488 cg = remote.changegroup(fetch, 'pull')
1483 cg = remote.changegroup(fetch, 'pull')
1489 else:
1484 else:
1490 if 'changegroupsubset' not in remote.capabilities:
1485 if 'changegroupsubset' not in remote.capabilities:
1491 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1486 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1492 cg = remote.changegroupsubset(fetch, heads, 'pull')
1487 cg = remote.changegroupsubset(fetch, heads, 'pull')
1493 return self.addchangegroup(cg, 'pull', remote.url())
1488 return self.addchangegroup(cg, 'pull', remote.url())
1494 finally:
1489 finally:
1495 del lock
1490 del lock
1496
1491
1497 def push(self, remote, force=False, revs=None):
1492 def push(self, remote, force=False, revs=None):
1498 # there are two ways to push to remote repo:
1493 # there are two ways to push to remote repo:
1499 #
1494 #
1500 # addchangegroup assumes local user can lock remote
1495 # addchangegroup assumes local user can lock remote
1501 # repo (local filesystem, old ssh servers).
1496 # repo (local filesystem, old ssh servers).
1502 #
1497 #
1503 # unbundle assumes local user cannot lock remote repo (new ssh
1498 # unbundle assumes local user cannot lock remote repo (new ssh
1504 # servers, http servers).
1499 # servers, http servers).
1505
1500
1506 if remote.capable('unbundle'):
1501 if remote.capable('unbundle'):
1507 return self.push_unbundle(remote, force, revs)
1502 return self.push_unbundle(remote, force, revs)
1508 return self.push_addchangegroup(remote, force, revs)
1503 return self.push_addchangegroup(remote, force, revs)
1509
1504
1510 def prepush(self, remote, force, revs):
1505 def prepush(self, remote, force, revs):
1511 base = {}
1506 base = {}
1512 remote_heads = remote.heads()
1507 remote_heads = remote.heads()
1513 inc = self.findincoming(remote, base, remote_heads, force=force)
1508 inc = self.findincoming(remote, base, remote_heads, force=force)
1514
1509
1515 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1510 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1516 if revs is not None:
1511 if revs is not None:
1517 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1512 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1518 else:
1513 else:
1519 bases, heads = update, self.changelog.heads()
1514 bases, heads = update, self.changelog.heads()
1520
1515
1521 if not bases:
1516 if not bases:
1522 self.ui.status(_("no changes found\n"))
1517 self.ui.status(_("no changes found\n"))
1523 return None, 1
1518 return None, 1
1524 elif not force:
1519 elif not force:
1525 # check if we're creating new remote heads
1520 # check if we're creating new remote heads
1526 # to be a remote head after push, node must be either
1521 # to be a remote head after push, node must be either
1527 # - unknown locally
1522 # - unknown locally
1528 # - a local outgoing head descended from update
1523 # - a local outgoing head descended from update
1529 # - a remote head that's known locally and not
1524 # - a remote head that's known locally and not
1530 # ancestral to an outgoing head
1525 # ancestral to an outgoing head
1531
1526
1532 warn = 0
1527 warn = 0
1533
1528
1534 if remote_heads == [nullid]:
1529 if remote_heads == [nullid]:
1535 warn = 0
1530 warn = 0
1536 elif not revs and len(heads) > len(remote_heads):
1531 elif not revs and len(heads) > len(remote_heads):
1537 warn = 1
1532 warn = 1
1538 else:
1533 else:
1539 newheads = list(heads)
1534 newheads = list(heads)
1540 for r in remote_heads:
1535 for r in remote_heads:
1541 if r in self.changelog.nodemap:
1536 if r in self.changelog.nodemap:
1542 desc = self.changelog.heads(r, heads)
1537 desc = self.changelog.heads(r, heads)
1543 l = [h for h in heads if h in desc]
1538 l = [h for h in heads if h in desc]
1544 if not l:
1539 if not l:
1545 newheads.append(r)
1540 newheads.append(r)
1546 else:
1541 else:
1547 newheads.append(r)
1542 newheads.append(r)
1548 if len(newheads) > len(remote_heads):
1543 if len(newheads) > len(remote_heads):
1549 warn = 1
1544 warn = 1
1550
1545
1551 if warn:
1546 if warn:
1552 self.ui.warn(_("abort: push creates new remote heads!\n"))
1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1553 self.ui.status(_("(did you forget to merge?"
1548 self.ui.status(_("(did you forget to merge?"
1554 " use push -f to force)\n"))
1549 " use push -f to force)\n"))
1555 return None, 0
1550 return None, 0
1556 elif inc:
1551 elif inc:
1557 self.ui.warn(_("note: unsynced remote changes!\n"))
1552 self.ui.warn(_("note: unsynced remote changes!\n"))
1558
1553
1559
1554
1560 if revs is None:
1555 if revs is None:
1561 cg = self.changegroup(update, 'push')
1556 cg = self.changegroup(update, 'push')
1562 else:
1557 else:
1563 cg = self.changegroupsubset(update, revs, 'push')
1558 cg = self.changegroupsubset(update, revs, 'push')
1564 return cg, remote_heads
1559 return cg, remote_heads
1565
1560
1566 def push_addchangegroup(self, remote, force, revs):
1561 def push_addchangegroup(self, remote, force, revs):
1567 lock = remote.lock()
1562 lock = remote.lock()
1568 try:
1563 try:
1569 ret = self.prepush(remote, force, revs)
1564 ret = self.prepush(remote, force, revs)
1570 if ret[0] is not None:
1565 if ret[0] is not None:
1571 cg, remote_heads = ret
1566 cg, remote_heads = ret
1572 return remote.addchangegroup(cg, 'push', self.url())
1567 return remote.addchangegroup(cg, 'push', self.url())
1573 return ret[1]
1568 return ret[1]
1574 finally:
1569 finally:
1575 del lock
1570 del lock
1576
1571
1577 def push_unbundle(self, remote, force, revs):
1572 def push_unbundle(self, remote, force, revs):
1578 # local repo finds heads on server, finds out what revs it
1573 # local repo finds heads on server, finds out what revs it
1579 # must push. once revs transferred, if server finds it has
1574 # must push. once revs transferred, if server finds it has
1580 # different heads (someone else won commit/push race), server
1575 # different heads (someone else won commit/push race), server
1581 # aborts.
1576 # aborts.
1582
1577
1583 ret = self.prepush(remote, force, revs)
1578 ret = self.prepush(remote, force, revs)
1584 if ret[0] is not None:
1579 if ret[0] is not None:
1585 cg, remote_heads = ret
1580 cg, remote_heads = ret
1586 if force: remote_heads = ['force']
1581 if force: remote_heads = ['force']
1587 return remote.unbundle(cg, remote_heads, 'push')
1582 return remote.unbundle(cg, remote_heads, 'push')
1588 return ret[1]
1583 return ret[1]
1589
1584
1590 def changegroupinfo(self, nodes, source):
1585 def changegroupinfo(self, nodes, source):
1591 if self.ui.verbose or source == 'bundle':
1586 if self.ui.verbose or source == 'bundle':
1592 self.ui.status(_("%d changesets found\n") % len(nodes))
1587 self.ui.status(_("%d changesets found\n") % len(nodes))
1593 if self.ui.debugflag:
1588 if self.ui.debugflag:
1594 self.ui.debug(_("List of changesets:\n"))
1589 self.ui.debug(_("List of changesets:\n"))
1595 for node in nodes:
1590 for node in nodes:
1596 self.ui.debug("%s\n" % hex(node))
1591 self.ui.debug("%s\n" % hex(node))
1597
1592
1598 def changegroupsubset(self, bases, heads, source, extranodes=None):
1593 def changegroupsubset(self, bases, heads, source, extranodes=None):
1599 """This function generates a changegroup consisting of all the nodes
1594 """This function generates a changegroup consisting of all the nodes
1600 that are descendents of any of the bases, and ancestors of any of
1595 that are descendents of any of the bases, and ancestors of any of
1601 the heads.
1596 the heads.
1602
1597
1603 It is fairly complex as determining which filenodes and which
1598 It is fairly complex as determining which filenodes and which
1604 manifest nodes need to be included for the changeset to be complete
1599 manifest nodes need to be included for the changeset to be complete
1605 is non-trivial.
1600 is non-trivial.
1606
1601
1607 Another wrinkle is doing the reverse, figuring out which changeset in
1602 Another wrinkle is doing the reverse, figuring out which changeset in
1608 the changegroup a particular filenode or manifestnode belongs to.
1603 the changegroup a particular filenode or manifestnode belongs to.
1609
1604
1610 The caller can specify some nodes that must be included in the
1605 The caller can specify some nodes that must be included in the
1611 changegroup using the extranodes argument. It should be a dict
1606 changegroup using the extranodes argument. It should be a dict
1612 where the keys are the filenames (or 1 for the manifest), and the
1607 where the keys are the filenames (or 1 for the manifest), and the
1613 values are lists of (node, linknode) tuples, where node is a wanted
1608 values are lists of (node, linknode) tuples, where node is a wanted
1614 node and linknode is the changelog node that should be transmitted as
1609 node and linknode is the changelog node that should be transmitted as
1615 the linkrev.
1610 the linkrev.
1616 """
1611 """
1617
1612
1618 self.hook('preoutgoing', throw=True, source=source)
1613 self.hook('preoutgoing', throw=True, source=source)
1619
1614
1620 # Set up some initial variables
1615 # Set up some initial variables
1621 # Make it easy to refer to self.changelog
1616 # Make it easy to refer to self.changelog
1622 cl = self.changelog
1617 cl = self.changelog
1623 # msng is short for missing - compute the list of changesets in this
1618 # msng is short for missing - compute the list of changesets in this
1624 # changegroup.
1619 # changegroup.
1625 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1620 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1626 self.changegroupinfo(msng_cl_lst, source)
1621 self.changegroupinfo(msng_cl_lst, source)
1627 # Some bases may turn out to be superfluous, and some heads may be
1622 # Some bases may turn out to be superfluous, and some heads may be
1628 # too. nodesbetween will return the minimal set of bases and heads
1623 # too. nodesbetween will return the minimal set of bases and heads
1629 # necessary to re-create the changegroup.
1624 # necessary to re-create the changegroup.
1630
1625
1631 # Known heads are the list of heads that it is assumed the recipient
1626 # Known heads are the list of heads that it is assumed the recipient
1632 # of this changegroup will know about.
1627 # of this changegroup will know about.
1633 knownheads = {}
1628 knownheads = {}
1634 # We assume that all parents of bases are known heads.
1629 # We assume that all parents of bases are known heads.
1635 for n in bases:
1630 for n in bases:
1636 for p in cl.parents(n):
1631 for p in cl.parents(n):
1637 if p != nullid:
1632 if p != nullid:
1638 knownheads[p] = 1
1633 knownheads[p] = 1
1639 knownheads = knownheads.keys()
1634 knownheads = knownheads.keys()
1640 if knownheads:
1635 if knownheads:
1641 # Now that we know what heads are known, we can compute which
1636 # Now that we know what heads are known, we can compute which
1642 # changesets are known. The recipient must know about all
1637 # changesets are known. The recipient must know about all
1643 # changesets required to reach the known heads from the null
1638 # changesets required to reach the known heads from the null
1644 # changeset.
1639 # changeset.
1645 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1640 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1646 junk = None
1641 junk = None
1647 # Transform the list into an ersatz set.
1642 # Transform the list into an ersatz set.
1648 has_cl_set = dict.fromkeys(has_cl_set)
1643 has_cl_set = dict.fromkeys(has_cl_set)
1649 else:
1644 else:
1650 # If there were no known heads, the recipient cannot be assumed to
1645 # If there were no known heads, the recipient cannot be assumed to
1651 # know about any changesets.
1646 # know about any changesets.
1652 has_cl_set = {}
1647 has_cl_set = {}
1653
1648
1654 # Make it easy to refer to self.manifest
1649 # Make it easy to refer to self.manifest
1655 mnfst = self.manifest
1650 mnfst = self.manifest
1656 # We don't know which manifests are missing yet
1651 # We don't know which manifests are missing yet
1657 msng_mnfst_set = {}
1652 msng_mnfst_set = {}
1658 # Nor do we know which filenodes are missing.
1653 # Nor do we know which filenodes are missing.
1659 msng_filenode_set = {}
1654 msng_filenode_set = {}
1660
1655
1661 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1656 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1662 junk = None
1657 junk = None
1663
1658
1664 # A changeset always belongs to itself, so the changenode lookup
1659 # A changeset always belongs to itself, so the changenode lookup
1665 # function for a changenode is identity.
1660 # function for a changenode is identity.
1666 def identity(x):
1661 def identity(x):
1667 return x
1662 return x
1668
1663
1669 # A function generating function. Sets up an environment for the
1664 # A function generating function. Sets up an environment for the
1670 # inner function.
1665 # inner function.
1671 def cmp_by_rev_func(revlog):
1666 def cmp_by_rev_func(revlog):
1672 # Compare two nodes by their revision number in the environment's
1667 # Compare two nodes by their revision number in the environment's
1673 # revision history. Since the revision number both represents the
1668 # revision history. Since the revision number both represents the
1674 # most efficient order to read the nodes in, and represents a
1669 # most efficient order to read the nodes in, and represents a
1675 # topological sorting of the nodes, this function is often useful.
1670 # topological sorting of the nodes, this function is often useful.
1676 def cmp_by_rev(a, b):
1671 def cmp_by_rev(a, b):
1677 return cmp(revlog.rev(a), revlog.rev(b))
1672 return cmp(revlog.rev(a), revlog.rev(b))
1678 return cmp_by_rev
1673 return cmp_by_rev
1679
1674
1680 # If we determine that a particular file or manifest node must be a
1675 # If we determine that a particular file or manifest node must be a
1681 # node that the recipient of the changegroup will already have, we can
1676 # node that the recipient of the changegroup will already have, we can
1682 # also assume the recipient will have all the parents. This function
1677 # also assume the recipient will have all the parents. This function
1683 # prunes them from the set of missing nodes.
1678 # prunes them from the set of missing nodes.
1684 def prune_parents(revlog, hasset, msngset):
1679 def prune_parents(revlog, hasset, msngset):
1685 haslst = hasset.keys()
1680 haslst = hasset.keys()
1686 haslst.sort(cmp_by_rev_func(revlog))
1681 haslst.sort(cmp_by_rev_func(revlog))
1687 for node in haslst:
1682 for node in haslst:
1688 parentlst = [p for p in revlog.parents(node) if p != nullid]
1683 parentlst = [p for p in revlog.parents(node) if p != nullid]
1689 while parentlst:
1684 while parentlst:
1690 n = parentlst.pop()
1685 n = parentlst.pop()
1691 if n not in hasset:
1686 if n not in hasset:
1692 hasset[n] = 1
1687 hasset[n] = 1
1693 p = [p for p in revlog.parents(n) if p != nullid]
1688 p = [p for p in revlog.parents(n) if p != nullid]
1694 parentlst.extend(p)
1689 parentlst.extend(p)
1695 for n in hasset:
1690 for n in hasset:
1696 msngset.pop(n, None)
1691 msngset.pop(n, None)
1697
1692
1698 # This is a function generating function used to set up an environment
1693 # This is a function generating function used to set up an environment
1699 # for the inner function to execute in.
1694 # for the inner function to execute in.
1700 def manifest_and_file_collector(changedfileset):
1695 def manifest_and_file_collector(changedfileset):
1701 # This is an information gathering function that gathers
1696 # This is an information gathering function that gathers
1702 # information from each changeset node that goes out as part of
1697 # information from each changeset node that goes out as part of
1703 # the changegroup. The information gathered is a list of which
1698 # the changegroup. The information gathered is a list of which
1704 # manifest nodes are potentially required (the recipient may
1699 # manifest nodes are potentially required (the recipient may
1705 # already have them) and total list of all files which were
1700 # already have them) and total list of all files which were
1706 # changed in any changeset in the changegroup.
1701 # changed in any changeset in the changegroup.
1707 #
1702 #
1708 # We also remember the first changenode we saw any manifest
1703 # We also remember the first changenode we saw any manifest
1709 # referenced by so we can later determine which changenode 'owns'
1704 # referenced by so we can later determine which changenode 'owns'
1710 # the manifest.
1705 # the manifest.
1711 def collect_manifests_and_files(clnode):
1706 def collect_manifests_and_files(clnode):
1712 c = cl.read(clnode)
1707 c = cl.read(clnode)
1713 for f in c[3]:
1708 for f in c[3]:
1714 # This is to make sure we only have one instance of each
1709 # This is to make sure we only have one instance of each
1715 # filename string for each filename.
1710 # filename string for each filename.
1716 changedfileset.setdefault(f, f)
1711 changedfileset.setdefault(f, f)
1717 msng_mnfst_set.setdefault(c[0], clnode)
1712 msng_mnfst_set.setdefault(c[0], clnode)
1718 return collect_manifests_and_files
1713 return collect_manifests_and_files
1719
1714
1720 # Figure out which manifest nodes (of the ones we think might be part
1715 # Figure out which manifest nodes (of the ones we think might be part
1721 # of the changegroup) the recipient must know about and remove them
1716 # of the changegroup) the recipient must know about and remove them
1722 # from the changegroup.
1717 # from the changegroup.
1723 def prune_manifests():
1718 def prune_manifests():
1724 has_mnfst_set = {}
1719 has_mnfst_set = {}
1725 for n in msng_mnfst_set:
1720 for n in msng_mnfst_set:
1726 # If a 'missing' manifest thinks it belongs to a changenode
1721 # If a 'missing' manifest thinks it belongs to a changenode
1727 # the recipient is assumed to have, obviously the recipient
1722 # the recipient is assumed to have, obviously the recipient
1728 # must have that manifest.
1723 # must have that manifest.
1729 linknode = cl.node(mnfst.linkrev(n))
1724 linknode = cl.node(mnfst.linkrev(n))
1730 if linknode in has_cl_set:
1725 if linknode in has_cl_set:
1731 has_mnfst_set[n] = 1
1726 has_mnfst_set[n] = 1
1732 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1727 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1733
1728
1734 # Use the information collected in collect_manifests_and_files to say
1729 # Use the information collected in collect_manifests_and_files to say
1735 # which changenode any manifestnode belongs to.
1730 # which changenode any manifestnode belongs to.
1736 def lookup_manifest_link(mnfstnode):
1731 def lookup_manifest_link(mnfstnode):
1737 return msng_mnfst_set[mnfstnode]
1732 return msng_mnfst_set[mnfstnode]
1738
1733
1739 # A function generating function that sets up the initial environment
1734 # A function generating function that sets up the initial environment
1740 # the inner function.
1735 # the inner function.
1741 def filenode_collector(changedfiles):
1736 def filenode_collector(changedfiles):
1742 next_rev = [0]
1737 next_rev = [0]
1743 # This gathers information from each manifestnode included in the
1738 # This gathers information from each manifestnode included in the
1744 # changegroup about which filenodes the manifest node references
1739 # changegroup about which filenodes the manifest node references
1745 # so we can include those in the changegroup too.
1740 # so we can include those in the changegroup too.
1746 #
1741 #
1747 # It also remembers which changenode each filenode belongs to. It
1742 # It also remembers which changenode each filenode belongs to. It
1748 # does this by assuming the a filenode belongs to the changenode
1743 # does this by assuming the a filenode belongs to the changenode
1749 # the first manifest that references it belongs to.
1744 # the first manifest that references it belongs to.
1750 def collect_msng_filenodes(mnfstnode):
1745 def collect_msng_filenodes(mnfstnode):
1751 r = mnfst.rev(mnfstnode)
1746 r = mnfst.rev(mnfstnode)
1752 if r == next_rev[0]:
1747 if r == next_rev[0]:
1753 # If the last rev we looked at was the one just previous,
1748 # If the last rev we looked at was the one just previous,
1754 # we only need to see a diff.
1749 # we only need to see a diff.
1755 deltamf = mnfst.readdelta(mnfstnode)
1750 deltamf = mnfst.readdelta(mnfstnode)
1756 # For each line in the delta
1751 # For each line in the delta
1757 for f, fnode in deltamf.items():
1752 for f, fnode in deltamf.items():
1758 f = changedfiles.get(f, None)
1753 f = changedfiles.get(f, None)
1759 # And if the file is in the list of files we care
1754 # And if the file is in the list of files we care
1760 # about.
1755 # about.
1761 if f is not None:
1756 if f is not None:
1762 # Get the changenode this manifest belongs to
1757 # Get the changenode this manifest belongs to
1763 clnode = msng_mnfst_set[mnfstnode]
1758 clnode = msng_mnfst_set[mnfstnode]
1764 # Create the set of filenodes for the file if
1759 # Create the set of filenodes for the file if
1765 # there isn't one already.
1760 # there isn't one already.
1766 ndset = msng_filenode_set.setdefault(f, {})
1761 ndset = msng_filenode_set.setdefault(f, {})
1767 # And set the filenode's changelog node to the
1762 # And set the filenode's changelog node to the
1768 # manifest's if it hasn't been set already.
1763 # manifest's if it hasn't been set already.
1769 ndset.setdefault(fnode, clnode)
1764 ndset.setdefault(fnode, clnode)
1770 else:
1765 else:
1771 # Otherwise we need a full manifest.
1766 # Otherwise we need a full manifest.
1772 m = mnfst.read(mnfstnode)
1767 m = mnfst.read(mnfstnode)
1773 # For every file in we care about.
1768 # For every file in we care about.
1774 for f in changedfiles:
1769 for f in changedfiles:
1775 fnode = m.get(f, None)
1770 fnode = m.get(f, None)
1776 # If it's in the manifest
1771 # If it's in the manifest
1777 if fnode is not None:
1772 if fnode is not None:
1778 # See comments above.
1773 # See comments above.
1779 clnode = msng_mnfst_set[mnfstnode]
1774 clnode = msng_mnfst_set[mnfstnode]
1780 ndset = msng_filenode_set.setdefault(f, {})
1775 ndset = msng_filenode_set.setdefault(f, {})
1781 ndset.setdefault(fnode, clnode)
1776 ndset.setdefault(fnode, clnode)
1782 # Remember the revision we hope to see next.
1777 # Remember the revision we hope to see next.
1783 next_rev[0] = r + 1
1778 next_rev[0] = r + 1
1784 return collect_msng_filenodes
1779 return collect_msng_filenodes
1785
1780
1786 # We have a list of filenodes we think we need for a file, lets remove
1781 # We have a list of filenodes we think we need for a file, lets remove
1787 # all those we now the recipient must have.
1782 # all those we now the recipient must have.
1788 def prune_filenodes(f, filerevlog):
1783 def prune_filenodes(f, filerevlog):
1789 msngset = msng_filenode_set[f]
1784 msngset = msng_filenode_set[f]
1790 hasset = {}
1785 hasset = {}
1791 # If a 'missing' filenode thinks it belongs to a changenode we
1786 # If a 'missing' filenode thinks it belongs to a changenode we
1792 # assume the recipient must have, then the recipient must have
1787 # assume the recipient must have, then the recipient must have
1793 # that filenode.
1788 # that filenode.
1794 for n in msngset:
1789 for n in msngset:
1795 clnode = cl.node(filerevlog.linkrev(n))
1790 clnode = cl.node(filerevlog.linkrev(n))
1796 if clnode in has_cl_set:
1791 if clnode in has_cl_set:
1797 hasset[n] = 1
1792 hasset[n] = 1
1798 prune_parents(filerevlog, hasset, msngset)
1793 prune_parents(filerevlog, hasset, msngset)
1799
1794
1800 # A function generator function that sets up the a context for the
1795 # A function generator function that sets up the a context for the
1801 # inner function.
1796 # inner function.
1802 def lookup_filenode_link_func(fname):
1797 def lookup_filenode_link_func(fname):
1803 msngset = msng_filenode_set[fname]
1798 msngset = msng_filenode_set[fname]
1804 # Lookup the changenode the filenode belongs to.
1799 # Lookup the changenode the filenode belongs to.
1805 def lookup_filenode_link(fnode):
1800 def lookup_filenode_link(fnode):
1806 return msngset[fnode]
1801 return msngset[fnode]
1807 return lookup_filenode_link
1802 return lookup_filenode_link
1808
1803
1809 # Add the nodes that were explicitly requested.
1804 # Add the nodes that were explicitly requested.
1810 def add_extra_nodes(name, nodes):
1805 def add_extra_nodes(name, nodes):
1811 if not extranodes or name not in extranodes:
1806 if not extranodes or name not in extranodes:
1812 return
1807 return
1813
1808
1814 for node, linknode in extranodes[name]:
1809 for node, linknode in extranodes[name]:
1815 if node not in nodes:
1810 if node not in nodes:
1816 nodes[node] = linknode
1811 nodes[node] = linknode
1817
1812
1818 # Now that we have all theses utility functions to help out and
1813 # Now that we have all theses utility functions to help out and
1819 # logically divide up the task, generate the group.
1814 # logically divide up the task, generate the group.
1820 def gengroup():
1815 def gengroup():
1821 # The set of changed files starts empty.
1816 # The set of changed files starts empty.
1822 changedfiles = {}
1817 changedfiles = {}
1823 # Create a changenode group generator that will call our functions
1818 # Create a changenode group generator that will call our functions
1824 # back to lookup the owning changenode and collect information.
1819 # back to lookup the owning changenode and collect information.
1825 group = cl.group(msng_cl_lst, identity,
1820 group = cl.group(msng_cl_lst, identity,
1826 manifest_and_file_collector(changedfiles))
1821 manifest_and_file_collector(changedfiles))
1827 for chnk in group:
1822 for chnk in group:
1828 yield chnk
1823 yield chnk
1829
1824
1830 # The list of manifests has been collected by the generator
1825 # The list of manifests has been collected by the generator
1831 # calling our functions back.
1826 # calling our functions back.
1832 prune_manifests()
1827 prune_manifests()
1833 add_extra_nodes(1, msng_mnfst_set)
1828 add_extra_nodes(1, msng_mnfst_set)
1834 msng_mnfst_lst = msng_mnfst_set.keys()
1829 msng_mnfst_lst = msng_mnfst_set.keys()
1835 # Sort the manifestnodes by revision number.
1830 # Sort the manifestnodes by revision number.
1836 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1831 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1837 # Create a generator for the manifestnodes that calls our lookup
1832 # Create a generator for the manifestnodes that calls our lookup
1838 # and data collection functions back.
1833 # and data collection functions back.
1839 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1834 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1840 filenode_collector(changedfiles))
1835 filenode_collector(changedfiles))
1841 for chnk in group:
1836 for chnk in group:
1842 yield chnk
1837 yield chnk
1843
1838
1844 # These are no longer needed, dereference and toss the memory for
1839 # These are no longer needed, dereference and toss the memory for
1845 # them.
1840 # them.
1846 msng_mnfst_lst = None
1841 msng_mnfst_lst = None
1847 msng_mnfst_set.clear()
1842 msng_mnfst_set.clear()
1848
1843
1849 if extranodes:
1844 if extranodes:
1850 for fname in extranodes:
1845 for fname in extranodes:
1851 if isinstance(fname, int):
1846 if isinstance(fname, int):
1852 continue
1847 continue
1853 add_extra_nodes(fname,
1848 add_extra_nodes(fname,
1854 msng_filenode_set.setdefault(fname, {}))
1849 msng_filenode_set.setdefault(fname, {}))
1855 changedfiles[fname] = 1
1850 changedfiles[fname] = 1
1856 changedfiles = changedfiles.keys()
1851 changedfiles = changedfiles.keys()
1857 changedfiles.sort()
1852 changedfiles.sort()
1858 # Go through all our files in order sorted by name.
1853 # Go through all our files in order sorted by name.
1859 for fname in changedfiles:
1854 for fname in changedfiles:
1860 filerevlog = self.file(fname)
1855 filerevlog = self.file(fname)
1861 if filerevlog.count() == 0:
1856 if filerevlog.count() == 0:
1862 raise util.Abort(_("empty or missing revlog for %s") % fname)
1857 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 # Toss out the filenodes that the recipient isn't really
1858 # Toss out the filenodes that the recipient isn't really
1864 # missing.
1859 # missing.
1865 if fname in msng_filenode_set:
1860 if fname in msng_filenode_set:
1866 prune_filenodes(fname, filerevlog)
1861 prune_filenodes(fname, filerevlog)
1867 msng_filenode_lst = msng_filenode_set[fname].keys()
1862 msng_filenode_lst = msng_filenode_set[fname].keys()
1868 else:
1863 else:
1869 msng_filenode_lst = []
1864 msng_filenode_lst = []
1870 # If any filenodes are left, generate the group for them,
1865 # If any filenodes are left, generate the group for them,
1871 # otherwise don't bother.
1866 # otherwise don't bother.
1872 if len(msng_filenode_lst) > 0:
1867 if len(msng_filenode_lst) > 0:
1873 yield changegroup.chunkheader(len(fname))
1868 yield changegroup.chunkheader(len(fname))
1874 yield fname
1869 yield fname
1875 # Sort the filenodes by their revision #
1870 # Sort the filenodes by their revision #
1876 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1871 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1877 # Create a group generator and only pass in a changenode
1872 # Create a group generator and only pass in a changenode
1878 # lookup function as we need to collect no information
1873 # lookup function as we need to collect no information
1879 # from filenodes.
1874 # from filenodes.
1880 group = filerevlog.group(msng_filenode_lst,
1875 group = filerevlog.group(msng_filenode_lst,
1881 lookup_filenode_link_func(fname))
1876 lookup_filenode_link_func(fname))
1882 for chnk in group:
1877 for chnk in group:
1883 yield chnk
1878 yield chnk
1884 if fname in msng_filenode_set:
1879 if fname in msng_filenode_set:
1885 # Don't need this anymore, toss it to free memory.
1880 # Don't need this anymore, toss it to free memory.
1886 del msng_filenode_set[fname]
1881 del msng_filenode_set[fname]
1887 # Signal that no more groups are left.
1882 # Signal that no more groups are left.
1888 yield changegroup.closechunk()
1883 yield changegroup.closechunk()
1889
1884
1890 if msng_cl_lst:
1885 if msng_cl_lst:
1891 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1886 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1892
1887
1893 return util.chunkbuffer(gengroup())
1888 return util.chunkbuffer(gengroup())
1894
1889
1895 def changegroup(self, basenodes, source):
1890 def changegroup(self, basenodes, source):
1896 """Generate a changegroup of all nodes that we have that a recipient
1891 """Generate a changegroup of all nodes that we have that a recipient
1897 doesn't.
1892 doesn't.
1898
1893
1899 This is much easier than the previous function as we can assume that
1894 This is much easier than the previous function as we can assume that
1900 the recipient has any changenode we aren't sending them."""
1895 the recipient has any changenode we aren't sending them."""
1901
1896
1902 self.hook('preoutgoing', throw=True, source=source)
1897 self.hook('preoutgoing', throw=True, source=source)
1903
1898
1904 cl = self.changelog
1899 cl = self.changelog
1905 nodes = cl.nodesbetween(basenodes, None)[0]
1900 nodes = cl.nodesbetween(basenodes, None)[0]
1906 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1901 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1907 self.changegroupinfo(nodes, source)
1902 self.changegroupinfo(nodes, source)
1908
1903
1909 def identity(x):
1904 def identity(x):
1910 return x
1905 return x
1911
1906
1912 def gennodelst(revlog):
1907 def gennodelst(revlog):
1913 for r in xrange(0, revlog.count()):
1908 for r in xrange(0, revlog.count()):
1914 n = revlog.node(r)
1909 n = revlog.node(r)
1915 if revlog.linkrev(n) in revset:
1910 if revlog.linkrev(n) in revset:
1916 yield n
1911 yield n
1917
1912
1918 def changed_file_collector(changedfileset):
1913 def changed_file_collector(changedfileset):
1919 def collect_changed_files(clnode):
1914 def collect_changed_files(clnode):
1920 c = cl.read(clnode)
1915 c = cl.read(clnode)
1921 for fname in c[3]:
1916 for fname in c[3]:
1922 changedfileset[fname] = 1
1917 changedfileset[fname] = 1
1923 return collect_changed_files
1918 return collect_changed_files
1924
1919
1925 def lookuprevlink_func(revlog):
1920 def lookuprevlink_func(revlog):
1926 def lookuprevlink(n):
1921 def lookuprevlink(n):
1927 return cl.node(revlog.linkrev(n))
1922 return cl.node(revlog.linkrev(n))
1928 return lookuprevlink
1923 return lookuprevlink
1929
1924
1930 def gengroup():
1925 def gengroup():
1931 # construct a list of all changed files
1926 # construct a list of all changed files
1932 changedfiles = {}
1927 changedfiles = {}
1933
1928
1934 for chnk in cl.group(nodes, identity,
1929 for chnk in cl.group(nodes, identity,
1935 changed_file_collector(changedfiles)):
1930 changed_file_collector(changedfiles)):
1936 yield chnk
1931 yield chnk
1937 changedfiles = changedfiles.keys()
1932 changedfiles = changedfiles.keys()
1938 changedfiles.sort()
1933 changedfiles.sort()
1939
1934
1940 mnfst = self.manifest
1935 mnfst = self.manifest
1941 nodeiter = gennodelst(mnfst)
1936 nodeiter = gennodelst(mnfst)
1942 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1937 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1943 yield chnk
1938 yield chnk
1944
1939
1945 for fname in changedfiles:
1940 for fname in changedfiles:
1946 filerevlog = self.file(fname)
1941 filerevlog = self.file(fname)
1947 if filerevlog.count() == 0:
1942 if filerevlog.count() == 0:
1948 raise util.Abort(_("empty or missing revlog for %s") % fname)
1943 raise util.Abort(_("empty or missing revlog for %s") % fname)
1949 nodeiter = gennodelst(filerevlog)
1944 nodeiter = gennodelst(filerevlog)
1950 nodeiter = list(nodeiter)
1945 nodeiter = list(nodeiter)
1951 if nodeiter:
1946 if nodeiter:
1952 yield changegroup.chunkheader(len(fname))
1947 yield changegroup.chunkheader(len(fname))
1953 yield fname
1948 yield fname
1954 lookup = lookuprevlink_func(filerevlog)
1949 lookup = lookuprevlink_func(filerevlog)
1955 for chnk in filerevlog.group(nodeiter, lookup):
1950 for chnk in filerevlog.group(nodeiter, lookup):
1956 yield chnk
1951 yield chnk
1957
1952
1958 yield changegroup.closechunk()
1953 yield changegroup.closechunk()
1959
1954
1960 if nodes:
1955 if nodes:
1961 self.hook('outgoing', node=hex(nodes[0]), source=source)
1956 self.hook('outgoing', node=hex(nodes[0]), source=source)
1962
1957
1963 return util.chunkbuffer(gengroup())
1958 return util.chunkbuffer(gengroup())
1964
1959
1965 def addchangegroup(self, source, srctype, url, emptyok=False):
1960 def addchangegroup(self, source, srctype, url, emptyok=False):
1966 """add changegroup to repo.
1961 """add changegroup to repo.
1967
1962
1968 return values:
1963 return values:
1969 - nothing changed or no source: 0
1964 - nothing changed or no source: 0
1970 - more heads than before: 1+added heads (2..n)
1965 - more heads than before: 1+added heads (2..n)
1971 - less heads than before: -1-removed heads (-2..-n)
1966 - less heads than before: -1-removed heads (-2..-n)
1972 - number of heads stays the same: 1
1967 - number of heads stays the same: 1
1973 """
1968 """
1974 def csmap(x):
1969 def csmap(x):
1975 self.ui.debug(_("add changeset %s\n") % short(x))
1970 self.ui.debug(_("add changeset %s\n") % short(x))
1976 return cl.count()
1971 return cl.count()
1977
1972
1978 def revmap(x):
1973 def revmap(x):
1979 return cl.rev(x)
1974 return cl.rev(x)
1980
1975
1981 if not source:
1976 if not source:
1982 return 0
1977 return 0
1983
1978
1984 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1979 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1985
1980
1986 changesets = files = revisions = 0
1981 changesets = files = revisions = 0
1987
1982
1988 # write changelog data to temp files so concurrent readers will not see
1983 # write changelog data to temp files so concurrent readers will not see
1989 # inconsistent view
1984 # inconsistent view
1990 cl = self.changelog
1985 cl = self.changelog
1991 cl.delayupdate()
1986 cl.delayupdate()
1992 oldheads = len(cl.heads())
1987 oldheads = len(cl.heads())
1993
1988
1994 tr = self.transaction()
1989 tr = self.transaction()
1995 try:
1990 try:
1996 trp = weakref.proxy(tr)
1991 trp = weakref.proxy(tr)
1997 # pull off the changeset group
1992 # pull off the changeset group
1998 self.ui.status(_("adding changesets\n"))
1993 self.ui.status(_("adding changesets\n"))
1999 cor = cl.count() - 1
1994 cor = cl.count() - 1
2000 chunkiter = changegroup.chunkiter(source)
1995 chunkiter = changegroup.chunkiter(source)
2001 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1996 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2002 raise util.Abort(_("received changelog group is empty"))
1997 raise util.Abort(_("received changelog group is empty"))
2003 cnr = cl.count() - 1
1998 cnr = cl.count() - 1
2004 changesets = cnr - cor
1999 changesets = cnr - cor
2005
2000
2006 # pull off the manifest group
2001 # pull off the manifest group
2007 self.ui.status(_("adding manifests\n"))
2002 self.ui.status(_("adding manifests\n"))
2008 chunkiter = changegroup.chunkiter(source)
2003 chunkiter = changegroup.chunkiter(source)
2009 # no need to check for empty manifest group here:
2004 # no need to check for empty manifest group here:
2010 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2005 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2011 # no new manifest will be created and the manifest group will
2006 # no new manifest will be created and the manifest group will
2012 # be empty during the pull
2007 # be empty during the pull
2013 self.manifest.addgroup(chunkiter, revmap, trp)
2008 self.manifest.addgroup(chunkiter, revmap, trp)
2014
2009
2015 # process the files
2010 # process the files
2016 self.ui.status(_("adding file changes\n"))
2011 self.ui.status(_("adding file changes\n"))
2017 while 1:
2012 while 1:
2018 f = changegroup.getchunk(source)
2013 f = changegroup.getchunk(source)
2019 if not f:
2014 if not f:
2020 break
2015 break
2021 self.ui.debug(_("adding %s revisions\n") % f)
2016 self.ui.debug(_("adding %s revisions\n") % f)
2022 fl = self.file(f)
2017 fl = self.file(f)
2023 o = fl.count()
2018 o = fl.count()
2024 chunkiter = changegroup.chunkiter(source)
2019 chunkiter = changegroup.chunkiter(source)
2025 if fl.addgroup(chunkiter, revmap, trp) is None:
2020 if fl.addgroup(chunkiter, revmap, trp) is None:
2026 raise util.Abort(_("received file revlog group is empty"))
2021 raise util.Abort(_("received file revlog group is empty"))
2027 revisions += fl.count() - o
2022 revisions += fl.count() - o
2028 files += 1
2023 files += 1
2029
2024
2030 # make changelog see real files again
2025 # make changelog see real files again
2031 cl.finalize(trp)
2026 cl.finalize(trp)
2032
2027
2033 newheads = len(self.changelog.heads())
2028 newheads = len(self.changelog.heads())
2034 heads = ""
2029 heads = ""
2035 if oldheads and newheads != oldheads:
2030 if oldheads and newheads != oldheads:
2036 heads = _(" (%+d heads)") % (newheads - oldheads)
2031 heads = _(" (%+d heads)") % (newheads - oldheads)
2037
2032
2038 self.ui.status(_("added %d changesets"
2033 self.ui.status(_("added %d changesets"
2039 " with %d changes to %d files%s\n")
2034 " with %d changes to %d files%s\n")
2040 % (changesets, revisions, files, heads))
2035 % (changesets, revisions, files, heads))
2041
2036
2042 if changesets > 0:
2037 if changesets > 0:
2043 self.hook('pretxnchangegroup', throw=True,
2038 self.hook('pretxnchangegroup', throw=True,
2044 node=hex(self.changelog.node(cor+1)), source=srctype,
2039 node=hex(self.changelog.node(cor+1)), source=srctype,
2045 url=url)
2040 url=url)
2046
2041
2047 tr.close()
2042 tr.close()
2048 finally:
2043 finally:
2049 del tr
2044 del tr
2050
2045
2051 if changesets > 0:
2046 if changesets > 0:
2052 # forcefully update the on-disk branch cache
2047 # forcefully update the on-disk branch cache
2053 self.ui.debug(_("updating the branch cache\n"))
2048 self.ui.debug(_("updating the branch cache\n"))
2054 self.branchtags()
2049 self.branchtags()
2055 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2050 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2056 source=srctype, url=url)
2051 source=srctype, url=url)
2057
2052
2058 for i in xrange(cor + 1, cnr + 1):
2053 for i in xrange(cor + 1, cnr + 1):
2059 self.hook("incoming", node=hex(self.changelog.node(i)),
2054 self.hook("incoming", node=hex(self.changelog.node(i)),
2060 source=srctype, url=url)
2055 source=srctype, url=url)
2061
2056
2062 # never return 0 here:
2057 # never return 0 here:
2063 if newheads < oldheads:
2058 if newheads < oldheads:
2064 return newheads - oldheads - 1
2059 return newheads - oldheads - 1
2065 else:
2060 else:
2066 return newheads - oldheads + 1
2061 return newheads - oldheads + 1
2067
2062
2068
2063
2069 def stream_in(self, remote):
2064 def stream_in(self, remote):
2070 fp = remote.stream_out()
2065 fp = remote.stream_out()
2071 l = fp.readline()
2066 l = fp.readline()
2072 try:
2067 try:
2073 resp = int(l)
2068 resp = int(l)
2074 except ValueError:
2069 except ValueError:
2075 raise util.UnexpectedOutput(
2070 raise util.UnexpectedOutput(
2076 _('Unexpected response from remote server:'), l)
2071 _('Unexpected response from remote server:'), l)
2077 if resp == 1:
2072 if resp == 1:
2078 raise util.Abort(_('operation forbidden by server'))
2073 raise util.Abort(_('operation forbidden by server'))
2079 elif resp == 2:
2074 elif resp == 2:
2080 raise util.Abort(_('locking the remote repository failed'))
2075 raise util.Abort(_('locking the remote repository failed'))
2081 elif resp != 0:
2076 elif resp != 0:
2082 raise util.Abort(_('the server sent an unknown error code'))
2077 raise util.Abort(_('the server sent an unknown error code'))
2083 self.ui.status(_('streaming all changes\n'))
2078 self.ui.status(_('streaming all changes\n'))
2084 l = fp.readline()
2079 l = fp.readline()
2085 try:
2080 try:
2086 total_files, total_bytes = map(int, l.split(' ', 1))
2081 total_files, total_bytes = map(int, l.split(' ', 1))
2087 except (ValueError, TypeError):
2082 except (ValueError, TypeError):
2088 raise util.UnexpectedOutput(
2083 raise util.UnexpectedOutput(
2089 _('Unexpected response from remote server:'), l)
2084 _('Unexpected response from remote server:'), l)
2090 self.ui.status(_('%d files to transfer, %s of data\n') %
2085 self.ui.status(_('%d files to transfer, %s of data\n') %
2091 (total_files, util.bytecount(total_bytes)))
2086 (total_files, util.bytecount(total_bytes)))
2092 start = time.time()
2087 start = time.time()
2093 for i in xrange(total_files):
2088 for i in xrange(total_files):
2094 # XXX doesn't support '\n' or '\r' in filenames
2089 # XXX doesn't support '\n' or '\r' in filenames
2095 l = fp.readline()
2090 l = fp.readline()
2096 try:
2091 try:
2097 name, size = l.split('\0', 1)
2092 name, size = l.split('\0', 1)
2098 size = int(size)
2093 size = int(size)
2099 except ValueError, TypeError:
2094 except ValueError, TypeError:
2100 raise util.UnexpectedOutput(
2095 raise util.UnexpectedOutput(
2101 _('Unexpected response from remote server:'), l)
2096 _('Unexpected response from remote server:'), l)
2102 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2097 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2103 ofp = self.sopener(name, 'w')
2098 ofp = self.sopener(name, 'w')
2104 for chunk in util.filechunkiter(fp, limit=size):
2099 for chunk in util.filechunkiter(fp, limit=size):
2105 ofp.write(chunk)
2100 ofp.write(chunk)
2106 ofp.close()
2101 ofp.close()
2107 elapsed = time.time() - start
2102 elapsed = time.time() - start
2108 if elapsed <= 0:
2103 if elapsed <= 0:
2109 elapsed = 0.001
2104 elapsed = 0.001
2110 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2105 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2111 (util.bytecount(total_bytes), elapsed,
2106 (util.bytecount(total_bytes), elapsed,
2112 util.bytecount(total_bytes / elapsed)))
2107 util.bytecount(total_bytes / elapsed)))
2113 self.invalidate()
2108 self.invalidate()
2114 return len(self.heads()) + 1
2109 return len(self.heads()) + 1
2115
2110
2116 def clone(self, remote, heads=[], stream=False):
2111 def clone(self, remote, heads=[], stream=False):
2117 '''clone remote repository.
2112 '''clone remote repository.
2118
2113
2119 keyword arguments:
2114 keyword arguments:
2120 heads: list of revs to clone (forces use of pull)
2115 heads: list of revs to clone (forces use of pull)
2121 stream: use streaming clone if possible'''
2116 stream: use streaming clone if possible'''
2122
2117
2123 # now, all clients that can request uncompressed clones can
2118 # now, all clients that can request uncompressed clones can
2124 # read repo formats supported by all servers that can serve
2119 # read repo formats supported by all servers that can serve
2125 # them.
2120 # them.
2126
2121
2127 # if revlog format changes, client will have to check version
2122 # if revlog format changes, client will have to check version
2128 # and format flags on "stream" capability, and use
2123 # and format flags on "stream" capability, and use
2129 # uncompressed only if compatible.
2124 # uncompressed only if compatible.
2130
2125
2131 if stream and not heads and remote.capable('stream'):
2126 if stream and not heads and remote.capable('stream'):
2132 return self.stream_in(remote)
2127 return self.stream_in(remote)
2133 return self.pull(remote, heads)
2128 return self.pull(remote, heads)
2134
2129
2135 # used to avoid circular references so destructors work
2130 # used to avoid circular references so destructors work
2136 def aftertrans(files):
2131 def aftertrans(files):
2137 renamefiles = [tuple(t) for t in files]
2132 renamefiles = [tuple(t) for t in files]
2138 def a():
2133 def a():
2139 for src, dest in renamefiles:
2134 for src, dest in renamefiles:
2140 util.rename(src, dest)
2135 util.rename(src, dest)
2141 return a
2136 return a
2142
2137
2143 def instance(ui, path, create):
2138 def instance(ui, path, create):
2144 return localrepository(ui, util.drop_scheme('file', path), create)
2139 return localrepository(ui, util.drop_scheme('file', path), create)
2145
2140
2146 def islocal(path):
2141 def islocal(path):
2147 return True
2142 return True
@@ -1,226 +1,225
1 adding a
1 adding a
2 changeset: 0:8580ff50825a
2 changeset: 0:8580ff50825a
3 user: test
3 user: test
4 date: Thu Jan 01 00:00:01 1970 +0000
4 date: Thu Jan 01 00:00:01 1970 +0000
5 summary: a
5 summary: a
6
6
7 % -f, directory
7 % -f, directory
8 abort: can only follow copies/renames for explicit file names
8 abort: can only follow copies/renames for explicit file names
9 % -f, but no args
9 % -f, but no args
10 changeset: 4:b30c444c7c84
10 changeset: 4:b30c444c7c84
11 tag: tip
11 tag: tip
12 user: test
12 user: test
13 date: Thu Jan 01 00:00:05 1970 +0000
13 date: Thu Jan 01 00:00:05 1970 +0000
14 summary: e
14 summary: e
15
15
16 changeset: 3:16b60bf3f99a
16 changeset: 3:16b60bf3f99a
17 user: test
17 user: test
18 date: Thu Jan 01 00:00:04 1970 +0000
18 date: Thu Jan 01 00:00:04 1970 +0000
19 summary: d
19 summary: d
20
20
21 changeset: 2:21fba396af4c
21 changeset: 2:21fba396af4c
22 user: test
22 user: test
23 date: Thu Jan 01 00:00:03 1970 +0000
23 date: Thu Jan 01 00:00:03 1970 +0000
24 summary: c
24 summary: c
25
25
26 changeset: 1:c0296dabce9b
26 changeset: 1:c0296dabce9b
27 user: test
27 user: test
28 date: Thu Jan 01 00:00:02 1970 +0000
28 date: Thu Jan 01 00:00:02 1970 +0000
29 summary: b
29 summary: b
30
30
31 changeset: 0:8580ff50825a
31 changeset: 0:8580ff50825a
32 user: test
32 user: test
33 date: Thu Jan 01 00:00:01 1970 +0000
33 date: Thu Jan 01 00:00:01 1970 +0000
34 summary: a
34 summary: a
35
35
36 % one rename
36 % one rename
37 changeset: 0:8580ff50825a
37 changeset: 0:8580ff50825a
38 user: test
38 user: test
39 date: Thu Jan 01 00:00:01 1970 +0000
39 date: Thu Jan 01 00:00:01 1970 +0000
40 files: a
40 files: a
41 description:
41 description:
42 a
42 a
43
43
44
44
45 % many renames
45 % many renames
46 changeset: 4:b30c444c7c84
46 changeset: 4:b30c444c7c84
47 tag: tip
47 tag: tip
48 user: test
48 user: test
49 date: Thu Jan 01 00:00:05 1970 +0000
49 date: Thu Jan 01 00:00:05 1970 +0000
50 files: dir/b e
50 files: dir/b e
51 description:
51 description:
52 e
52 e
53
53
54
54
55 changeset: 2:21fba396af4c
55 changeset: 2:21fba396af4c
56 user: test
56 user: test
57 date: Thu Jan 01 00:00:03 1970 +0000
57 date: Thu Jan 01 00:00:03 1970 +0000
58 files: b dir/b
58 files: b dir/b
59 description:
59 description:
60 c
60 c
61
61
62
62
63 changeset: 1:c0296dabce9b
63 changeset: 1:c0296dabce9b
64 user: test
64 user: test
65 date: Thu Jan 01 00:00:02 1970 +0000
65 date: Thu Jan 01 00:00:02 1970 +0000
66 files: b
66 files: b
67 description:
67 description:
68 b
68 b
69
69
70
70
71 changeset: 0:8580ff50825a
71 changeset: 0:8580ff50825a
72 user: test
72 user: test
73 date: Thu Jan 01 00:00:01 1970 +0000
73 date: Thu Jan 01 00:00:01 1970 +0000
74 files: a
74 files: a
75 description:
75 description:
76 a
76 a
77
77
78
78
79 % log copies
79 % log copies
80 4 e (dir/b)
80 4 e (dir/b)
81 3 b (a)
81 3 b (a)
82 2 dir/b (b)
82 2 dir/b (b)
83 1 b (a)
83 1 b (a)
84 0
84 0
85 % log copies, non-linear manifest
85 % log copies, non-linear manifest
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 adding foo
87 adding foo
88 created new head
88 created new head
89 5 e (dir/b)
89 5 e (dir/b)
90 % log copies, execute bit set
90 % log copies, execute bit set
91 6
91 6
92 % log -p d
92 % log -p d
93 changeset: 3:16b60bf3f99a
93 changeset: 3:16b60bf3f99a
94 user: test
94 user: test
95 date: Thu Jan 01 00:00:04 1970 +0000
95 date: Thu Jan 01 00:00:04 1970 +0000
96 files: a b d
96 files: a b d
97 description:
97 description:
98 d
98 d
99
99
100
100
101 diff -r 21fba396af4c -r 16b60bf3f99a d
101 diff -r 21fba396af4c -r 16b60bf3f99a d
102 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
102 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
103 +++ b/d Thu Jan 01 00:00:04 1970 +0000
103 +++ b/d Thu Jan 01 00:00:04 1970 +0000
104 @@ -0,0 +1,1 @@
104 @@ -0,0 +1,1 @@
105 +a
105 +a
106
106
107 adding base
107 adding base
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 adding b1
109 adding b1
110 created new head
110 created new head
111 % log -f
111 % log -f
112 changeset: 3:e62f78d544b4
112 changeset: 3:e62f78d544b4
113 tag: tip
113 tag: tip
114 parent: 1:3d5bf5654eda
114 parent: 1:3d5bf5654eda
115 user: test
115 user: test
116 date: Thu Jan 01 00:00:01 1970 +0000
116 date: Thu Jan 01 00:00:01 1970 +0000
117 summary: b1
117 summary: b1
118
118
119 changeset: 1:3d5bf5654eda
119 changeset: 1:3d5bf5654eda
120 user: test
120 user: test
121 date: Thu Jan 01 00:00:01 1970 +0000
121 date: Thu Jan 01 00:00:01 1970 +0000
122 summary: r1
122 summary: r1
123
123
124 changeset: 0:67e992f2c4f3
124 changeset: 0:67e992f2c4f3
125 user: test
125 user: test
126 date: Thu Jan 01 00:00:01 1970 +0000
126 date: Thu Jan 01 00:00:01 1970 +0000
127 summary: base
127 summary: base
128
128
129 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
129 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
130 adding b2
130 adding b2
131 created new head
131 created new head
132 % log -f -r 1:tip
132 % log -f -r 1:tip
133 changeset: 1:3d5bf5654eda
133 changeset: 1:3d5bf5654eda
134 user: test
134 user: test
135 date: Thu Jan 01 00:00:01 1970 +0000
135 date: Thu Jan 01 00:00:01 1970 +0000
136 summary: r1
136 summary: r1
137
137
138 changeset: 2:60c670bf5b30
138 changeset: 2:60c670bf5b30
139 user: test
139 user: test
140 date: Thu Jan 01 00:00:01 1970 +0000
140 date: Thu Jan 01 00:00:01 1970 +0000
141 summary: r2
141 summary: r2
142
142
143 changeset: 3:e62f78d544b4
143 changeset: 3:e62f78d544b4
144 parent: 1:3d5bf5654eda
144 parent: 1:3d5bf5654eda
145 user: test
145 user: test
146 date: Thu Jan 01 00:00:01 1970 +0000
146 date: Thu Jan 01 00:00:01 1970 +0000
147 summary: b1
147 summary: b1
148
148
149 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
149 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 (branch merge, don't forget to commit)
151 (branch merge, don't forget to commit)
152 % log -r . with two parents
152 % log -r . with two parents
153 warning: working directory has two parents, tag '.' uses the first
154 changeset: 3:e62f78d544b4
153 changeset: 3:e62f78d544b4
155 parent: 1:3d5bf5654eda
154 parent: 1:3d5bf5654eda
156 user: test
155 user: test
157 date: Thu Jan 01 00:00:01 1970 +0000
156 date: Thu Jan 01 00:00:01 1970 +0000
158 summary: b1
157 summary: b1
159
158
160 % log -r . with one parent
159 % log -r . with one parent
161 changeset: 5:302e9dd6890d
160 changeset: 5:302e9dd6890d
162 tag: tip
161 tag: tip
163 parent: 3:e62f78d544b4
162 parent: 3:e62f78d544b4
164 parent: 4:ddb82e70d1a1
163 parent: 4:ddb82e70d1a1
165 user: test
164 user: test
166 date: Thu Jan 01 00:00:01 1970 +0000
165 date: Thu Jan 01 00:00:01 1970 +0000
167 summary: m12
166 summary: m12
168
167
169 % log --follow-first
168 % log --follow-first
170 changeset: 6:2404bbcab562
169 changeset: 6:2404bbcab562
171 tag: tip
170 tag: tip
172 user: test
171 user: test
173 date: Thu Jan 01 00:00:01 1970 +0000
172 date: Thu Jan 01 00:00:01 1970 +0000
174 summary: b1.1
173 summary: b1.1
175
174
176 changeset: 5:302e9dd6890d
175 changeset: 5:302e9dd6890d
177 parent: 3:e62f78d544b4
176 parent: 3:e62f78d544b4
178 parent: 4:ddb82e70d1a1
177 parent: 4:ddb82e70d1a1
179 user: test
178 user: test
180 date: Thu Jan 01 00:00:01 1970 +0000
179 date: Thu Jan 01 00:00:01 1970 +0000
181 summary: m12
180 summary: m12
182
181
183 changeset: 3:e62f78d544b4
182 changeset: 3:e62f78d544b4
184 parent: 1:3d5bf5654eda
183 parent: 1:3d5bf5654eda
185 user: test
184 user: test
186 date: Thu Jan 01 00:00:01 1970 +0000
185 date: Thu Jan 01 00:00:01 1970 +0000
187 summary: b1
186 summary: b1
188
187
189 changeset: 1:3d5bf5654eda
188 changeset: 1:3d5bf5654eda
190 user: test
189 user: test
191 date: Thu Jan 01 00:00:01 1970 +0000
190 date: Thu Jan 01 00:00:01 1970 +0000
192 summary: r1
191 summary: r1
193
192
194 changeset: 0:67e992f2c4f3
193 changeset: 0:67e992f2c4f3
195 user: test
194 user: test
196 date: Thu Jan 01 00:00:01 1970 +0000
195 date: Thu Jan 01 00:00:01 1970 +0000
197 summary: base
196 summary: base
198
197
199 % log -P 2
198 % log -P 2
200 changeset: 6:2404bbcab562
199 changeset: 6:2404bbcab562
201 tag: tip
200 tag: tip
202 user: test
201 user: test
203 date: Thu Jan 01 00:00:01 1970 +0000
202 date: Thu Jan 01 00:00:01 1970 +0000
204 summary: b1.1
203 summary: b1.1
205
204
206 changeset: 5:302e9dd6890d
205 changeset: 5:302e9dd6890d
207 parent: 3:e62f78d544b4
206 parent: 3:e62f78d544b4
208 parent: 4:ddb82e70d1a1
207 parent: 4:ddb82e70d1a1
209 user: test
208 user: test
210 date: Thu Jan 01 00:00:01 1970 +0000
209 date: Thu Jan 01 00:00:01 1970 +0000
211 summary: m12
210 summary: m12
212
211
213 changeset: 4:ddb82e70d1a1
212 changeset: 4:ddb82e70d1a1
214 parent: 0:67e992f2c4f3
213 parent: 0:67e992f2c4f3
215 user: test
214 user: test
216 date: Thu Jan 01 00:00:01 1970 +0000
215 date: Thu Jan 01 00:00:01 1970 +0000
217 summary: b2
216 summary: b2
218
217
219 changeset: 3:e62f78d544b4
218 changeset: 3:e62f78d544b4
220 parent: 1:3d5bf5654eda
219 parent: 1:3d5bf5654eda
221 user: test
220 user: test
222 date: Thu Jan 01 00:00:01 1970 +0000
221 date: Thu Jan 01 00:00:01 1970 +0000
223 summary: b1
222 summary: b1
224
223
225 % log -r ""
224 % log -r ""
226 abort: 00changelog.i@: ambiguous identifier!
225 abort: 00changelog.i@: ambiguous identifier!
@@ -1,36 +1,35
1 created new head
1 created new head
2 merging foo1 and foo to foo1
2 merging foo1 and foo to foo1
3 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
3 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
4 (branch merge, don't forget to commit)
4 (branch merge, don't forget to commit)
5 n 0 -2 bar
5 n 0 -2 bar
6 m 644 14 foo1
6 m 644 14 foo1
7 copy: foo -> foo1
7 copy: foo -> foo1
8 M bar
8 M bar
9 M foo1
9 M foo1
10 % removing foo1 and bar
10 % removing foo1 and bar
11 r 0 -2 bar
11 r 0 -2 bar
12 r 0 -1 foo1
12 r 0 -1 foo1
13 copy: foo -> foo1
13 copy: foo -> foo1
14 R bar
14 R bar
15 R foo1
15 R foo1
16 % readding foo1 and bar
16 % readding foo1 and bar
17 adding bar
17 adding bar
18 adding foo1
18 adding foo1
19 n 0 -2 bar
19 n 0 -2 bar
20 m 644 14 foo1
20 m 644 14 foo1
21 copy: foo -> foo1
21 copy: foo -> foo1
22 M bar
22 M bar
23 M foo1
23 M foo1
24 foo
24 foo
25 % reverting foo1 and bar
25 % reverting foo1 and bar
26 warning: working directory has two parents, tag '.' uses the first
27 saving current version of bar as bar.orig
26 saving current version of bar as bar.orig
28 reverting bar
27 reverting bar
29 saving current version of foo1 as foo1.orig
28 saving current version of foo1 as foo1.orig
30 reverting foo1
29 reverting foo1
31 n 0 -2 bar
30 n 0 -2 bar
32 m 644 14 foo1
31 m 644 14 foo1
33 copy: foo -> foo1
32 copy: foo -> foo1
34 M bar
33 M bar
35 M foo1
34 M foo1
36 foo
35 foo
General Comments 0
You need to be logged in to leave comments. Login now