##// END OF EJS Templates
repo.status: eliminate list_
Matt Mackall -
r6753:ed5ffb2c default
parent child Browse files
Show More
@@ -1,105 +1,103 b''
1 # __init__.py - inotify-based status acceleration for Linux
1 # __init__.py - inotify-based status acceleration for Linux
2 #
2 #
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 '''inotify-based status acceleration for Linux systems
9 '''inotify-based status acceleration for Linux systems
10 '''
10 '''
11
11
12 # todo: socket permissions
12 # todo: socket permissions
13
13
14 from mercurial.i18n import gettext as _
14 from mercurial.i18n import gettext as _
15 from mercurial import cmdutil, util
15 from mercurial import cmdutil, util
16 import client, errno, os, server, socket
16 import client, errno, os, server, socket
17 from weakref import proxy
17 from weakref import proxy
18
18
19 def serve(ui, repo, **opts):
19 def serve(ui, repo, **opts):
20 '''start an inotify server for this repository'''
20 '''start an inotify server for this repository'''
21 timeout = opts.get('timeout')
21 timeout = opts.get('timeout')
22 if timeout:
22 if timeout:
23 timeout = float(timeout) * 1e3
23 timeout = float(timeout) * 1e3
24
24
25 class service:
25 class service:
26 def init(self):
26 def init(self):
27 self.master = server.Master(ui, repo, timeout)
27 self.master = server.Master(ui, repo, timeout)
28
28
29 def run(self):
29 def run(self):
30 try:
30 try:
31 self.master.run()
31 self.master.run()
32 finally:
32 finally:
33 self.master.shutdown()
33 self.master.shutdown()
34
34
35 service = service()
35 service = service()
36 cmdutil.service(opts, initfn=service.init, runfn=service.run)
36 cmdutil.service(opts, initfn=service.init, runfn=service.run)
37
37
38 def reposetup(ui, repo):
38 def reposetup(ui, repo):
39 if not repo.local():
39 if not repo.local():
40 return
40 return
41
41
42 # XXX: weakref until hg stops relying on __del__
42 # XXX: weakref until hg stops relying on __del__
43 repo = proxy(repo)
43 repo = proxy(repo)
44
44
45 class inotifydirstate(repo.dirstate.__class__):
45 class inotifydirstate(repo.dirstate.__class__):
46 # Set to True if we're the inotify server, so we don't attempt
46 # Set to True if we're the inotify server, so we don't attempt
47 # to recurse.
47 # to recurse.
48 inotifyserver = False
48 inotifyserver = False
49
49
50 def status(self, match, list_ignored, list_clean,
50 def status(self, match, ignored, clean, unknown=True):
51 list_unknown=True):
52 files = match.files()
51 files = match.files()
53 try:
52 try:
54 if not list_ignored and not self.inotifyserver:
53 if not ignored and not self.inotifyserver:
55 result = client.query(ui, repo, files, match, False,
54 result = client.query(ui, repo, files, match, False,
56 list_clean, list_unknown)
55 clean, unknown)
57 if result is not None:
56 if result is not None:
58 return result
57 return result
59 except socket.error, err:
58 except socket.error, err:
60 if err[0] == errno.ECONNREFUSED:
59 if err[0] == errno.ECONNREFUSED:
61 ui.warn(_('(found dead inotify server socket; '
60 ui.warn(_('(found dead inotify server socket; '
62 'removing it)\n'))
61 'removing it)\n'))
63 os.unlink(repo.join('inotify.sock'))
62 os.unlink(repo.join('inotify.sock'))
64 elif err[0] != errno.ENOENT:
63 elif err[0] != errno.ENOENT:
65 raise
64 raise
66 if ui.configbool('inotify', 'autostart'):
65 if ui.configbool('inotify', 'autostart'):
67 query = None
66 query = None
68 ui.debug(_('(starting inotify server)\n'))
67 ui.debug(_('(starting inotify server)\n'))
69 try:
68 try:
70 server.start(ui, repo)
69 server.start(ui, repo)
71 query = client.query
70 query = client.query
72 except server.AlreadyStartedException, inst:
71 except server.AlreadyStartedException, inst:
73 # another process may have started its own
72 # another process may have started its own
74 # inotify server while this one was starting.
73 # inotify server while this one was starting.
75 ui.debug(str(inst))
74 ui.debug(str(inst))
76 query = client.query
75 query = client.query
77 except Exception, inst:
76 except Exception, inst:
78 ui.warn(_('could not start inotify server: '
77 ui.warn(_('could not start inotify server: '
79 '%s\n') % inst)
78 '%s\n') % inst)
80 ui.print_exc()
79 ui.print_exc()
81
80
82 if query:
81 if query:
83 try:
82 try:
84 return query(ui, repo, files or [], match,
83 return query(ui, repo, files or [], match,
85 list_ignored, list_clean, list_unknown)
84 ignored, clean, unknown)
86 except socket.error, err:
85 except socket.error, err:
87 ui.warn(_('could not talk to new inotify '
86 ui.warn(_('could not talk to new inotify '
88 'server: %s\n') % err[1])
87 'server: %s\n') % err[1])
89 ui.print_exc()
88 ui.print_exc()
90
89
91 return super(inotifydirstate, self).status(
90 return super(inotifydirstate, self).status(
92 match, list_ignored, list_clean,
91 match, ignored, clean, unknown)
93 list_unknown)
94
92
95 repo.dirstate.__class__ = inotifydirstate
93 repo.dirstate.__class__ = inotifydirstate
96
94
97 cmdtable = {
95 cmdtable = {
98 '^inserve':
96 '^inserve':
99 (serve,
97 (serve,
100 [('d', 'daemon', None, _('run server in background')),
98 [('d', 'daemon', None, _('run server in background')),
101 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
99 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
102 ('t', 'idle-timeout', '', _('minutes to sit idle before exiting')),
100 ('t', 'idle-timeout', '', _('minutes to sit idle before exiting')),
103 ('', 'pid-file', '', _('name of file to write process ID to'))],
101 ('', 'pid-file', '', _('name of file to write process ID to'))],
104 _('hg inserve [OPT]...')),
102 _('hg inserve [OPT]...')),
105 }
103 }
@@ -1,55 +1,55 b''
1 # client.py - inotify status client
1 # client.py - inotify status client
2 #
2 #
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from mercurial.i18n import gettext as _
9 from mercurial.i18n import gettext as _
10 from mercurial import ui
10 from mercurial import ui
11 import common
11 import common
12 import os, select, socket, stat, struct, sys
12 import os, select, socket, stat, struct, sys
13
13
14 def query(ui, repo, names, match, list_ignored, list_clean, list_unknown=True):
14 def query(ui, repo, names, match, ignored, clean, unknown=True):
15 sock = socket.socket(socket.AF_UNIX)
15 sock = socket.socket(socket.AF_UNIX)
16 sockpath = repo.join('inotify.sock')
16 sockpath = repo.join('inotify.sock')
17 sock.connect(sockpath)
17 sock.connect(sockpath)
18
18
19 def genquery():
19 def genquery():
20 for n in names or []:
20 for n in names or []:
21 yield n
21 yield n
22 states = 'almrx!'
22 states = 'almrx!'
23 if list_ignored:
23 if ignored:
24 raise ValueError('this is insanity')
24 raise ValueError('this is insanity')
25 if list_clean: states += 'n'
25 if clean: states += 'n'
26 if list_unknown: states += '?'
26 if unknown: states += '?'
27 yield states
27 yield states
28
28
29 req = '\0'.join(genquery())
29 req = '\0'.join(genquery())
30
30
31 sock.sendall(chr(common.version))
31 sock.sendall(chr(common.version))
32 sock.sendall(req)
32 sock.sendall(req)
33 sock.shutdown(socket.SHUT_WR)
33 sock.shutdown(socket.SHUT_WR)
34
34
35 cs = common.recvcs(sock)
35 cs = common.recvcs(sock)
36 version = ord(cs.read(1))
36 version = ord(cs.read(1))
37
37
38 if version != common.version:
38 if version != common.version:
39 ui.warn(_('(inotify: received response from incompatible server '
39 ui.warn(_('(inotify: received response from incompatible server '
40 'version %d)\n') % version)
40 'version %d)\n') % version)
41 return None
41 return None
42
42
43 try:
43 try:
44 resphdr = struct.unpack(common.resphdrfmt, cs.read(common.resphdrsize))
44 resphdr = struct.unpack(common.resphdrfmt, cs.read(common.resphdrsize))
45 except struct.error:
45 except struct.error:
46 return None
46 return None
47
47
48 def readnames(nbytes):
48 def readnames(nbytes):
49 if nbytes:
49 if nbytes:
50 names = cs.read(nbytes)
50 names = cs.read(nbytes)
51 if names:
51 if names:
52 return filter(match, names.split('\0'))
52 return filter(match, names.split('\0'))
53 return []
53 return []
54
54
55 return map(readnames, resphdr)
55 return map(readnames, resphdr)
@@ -1,566 +1,566 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007, 2008 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007, 2008 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a DSCM
10 # Keyword expansion hack against the grain of a DSCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an audience
14 # files (like LaTeX packages), that are mostly addressed to an audience
15 # not running a version control system.
15 # not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <http://www.selenic.com/mercurial/wiki/index.cgi/KeywordPlan>.
18 # <http://www.selenic.com/mercurial/wiki/index.cgi/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Setup in hgrc:
24 # Setup in hgrc:
25 #
25 #
26 # [extensions]
26 # [extensions]
27 # # enable extension
27 # # enable extension
28 # hgext.keyword =
28 # hgext.keyword =
29 #
29 #
30 # Files to act upon/ignore are specified in the [keyword] section.
30 # Files to act upon/ignore are specified in the [keyword] section.
31 # Customized keyword template mappings in the [keywordmaps] section.
31 # Customized keyword template mappings in the [keywordmaps] section.
32 #
32 #
33 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
33 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
34
34
35 '''keyword expansion in local repositories
35 '''keyword expansion in local repositories
36
36
37 This extension expands RCS/CVS-like or self-customized $Keywords$
37 This extension expands RCS/CVS-like or self-customized $Keywords$
38 in tracked text files selected by your configuration.
38 in tracked text files selected by your configuration.
39
39
40 Keywords are only expanded in local repositories and not stored in
40 Keywords are only expanded in local repositories and not stored in
41 the change history. The mechanism can be regarded as a convenience
41 the change history. The mechanism can be regarded as a convenience
42 for the current user or for archive distribution.
42 for the current user or for archive distribution.
43
43
44 Configuration is done in the [keyword] and [keywordmaps] sections
44 Configuration is done in the [keyword] and [keywordmaps] sections
45 of hgrc files.
45 of hgrc files.
46
46
47 Example:
47 Example:
48
48
49 [keyword]
49 [keyword]
50 # expand keywords in every python file except those matching "x*"
50 # expand keywords in every python file except those matching "x*"
51 **.py =
51 **.py =
52 x* = ignore
52 x* = ignore
53
53
54 Note: the more specific you are in your filename patterns
54 Note: the more specific you are in your filename patterns
55 the less you lose speed in huge repos.
55 the less you lose speed in huge repos.
56
56
57 For [keywordmaps] template mapping and expansion demonstration and
57 For [keywordmaps] template mapping and expansion demonstration and
58 control run "hg kwdemo".
58 control run "hg kwdemo".
59
59
60 An additional date template filter {date|utcdate} is provided.
60 An additional date template filter {date|utcdate} is provided.
61
61
62 The default template mappings (view with "hg kwdemo -d") can be replaced
62 The default template mappings (view with "hg kwdemo -d") can be replaced
63 with customized keywords and templates.
63 with customized keywords and templates.
64 Again, run "hg kwdemo" to control the results of your config changes.
64 Again, run "hg kwdemo" to control the results of your config changes.
65
65
66 Before changing/disabling active keywords, run "hg kwshrink" to avoid
66 Before changing/disabling active keywords, run "hg kwshrink" to avoid
67 the risk of inadvertedly storing expanded keywords in the change history.
67 the risk of inadvertedly storing expanded keywords in the change history.
68
68
69 To force expansion after enabling it, or a configuration change, run
69 To force expansion after enabling it, or a configuration change, run
70 "hg kwexpand".
70 "hg kwexpand".
71
71
72 Also, when committing with the record extension or using mq's qrecord, be aware
72 Also, when committing with the record extension or using mq's qrecord, be aware
73 that keywords cannot be updated. Again, run "hg kwexpand" on the files in
73 that keywords cannot be updated. Again, run "hg kwexpand" on the files in
74 question to update keyword expansions after all changes have been checked in.
74 question to update keyword expansions after all changes have been checked in.
75
75
76 Expansions spanning more than one line and incremental expansions,
76 Expansions spanning more than one line and incremental expansions,
77 like CVS' $Log$, are not supported. A keyword template map
77 like CVS' $Log$, are not supported. A keyword template map
78 "Log = {desc}" expands to the first line of the changeset description.
78 "Log = {desc}" expands to the first line of the changeset description.
79 '''
79 '''
80
80
81 from mercurial import commands, cmdutil, dispatch, filelog, revlog
81 from mercurial import commands, cmdutil, dispatch, filelog, revlog
82 from mercurial import patch, localrepo, templater, templatefilters, util
82 from mercurial import patch, localrepo, templater, templatefilters, util
83 from mercurial.hgweb import webcommands
83 from mercurial.hgweb import webcommands
84 from mercurial.node import nullid, hex
84 from mercurial.node import nullid, hex
85 from mercurial.i18n import _
85 from mercurial.i18n import _
86 import re, shutil, tempfile, time
86 import re, shutil, tempfile, time
87
87
88 commands.optionalrepo += ' kwdemo'
88 commands.optionalrepo += ' kwdemo'
89
89
90 # hg commands that do not act on keywords
90 # hg commands that do not act on keywords
91 nokwcommands = ('add addremove annotate bundle copy export grep incoming init'
91 nokwcommands = ('add addremove annotate bundle copy export grep incoming init'
92 ' log outgoing push rename rollback tip'
92 ' log outgoing push rename rollback tip'
93 ' convert email glog')
93 ' convert email glog')
94
94
95 # hg commands that trigger expansion only when writing to working dir,
95 # hg commands that trigger expansion only when writing to working dir,
96 # not when reading filelog, and unexpand when reading from working dir
96 # not when reading filelog, and unexpand when reading from working dir
97 restricted = 'record qfold qimport qnew qpush qrefresh qrecord'
97 restricted = 'record qfold qimport qnew qpush qrefresh qrecord'
98
98
99 def utcdate(date):
99 def utcdate(date):
100 '''Returns hgdate in cvs-like UTC format.'''
100 '''Returns hgdate in cvs-like UTC format.'''
101 return time.strftime('%Y/%m/%d %H:%M:%S', time.gmtime(date[0]))
101 return time.strftime('%Y/%m/%d %H:%M:%S', time.gmtime(date[0]))
102
102
103 # make keyword tools accessible
103 # make keyword tools accessible
104 kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']}
104 kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']}
105
105
106
106
107 class kwtemplater(object):
107 class kwtemplater(object):
108 '''
108 '''
109 Sets up keyword templates, corresponding keyword regex, and
109 Sets up keyword templates, corresponding keyword regex, and
110 provides keyword substitution functions.
110 provides keyword substitution functions.
111 '''
111 '''
112 templates = {
112 templates = {
113 'Revision': '{node|short}',
113 'Revision': '{node|short}',
114 'Author': '{author|user}',
114 'Author': '{author|user}',
115 'Date': '{date|utcdate}',
115 'Date': '{date|utcdate}',
116 'RCSFile': '{file|basename},v',
116 'RCSFile': '{file|basename},v',
117 'Source': '{root}/{file},v',
117 'Source': '{root}/{file},v',
118 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
118 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
119 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
119 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
120 }
120 }
121
121
122 def __init__(self, ui, repo):
122 def __init__(self, ui, repo):
123 self.ui = ui
123 self.ui = ui
124 self.repo = repo
124 self.repo = repo
125 self.matcher = util.matcher(repo.root,
125 self.matcher = util.matcher(repo.root,
126 inc=kwtools['inc'], exc=kwtools['exc'])[1]
126 inc=kwtools['inc'], exc=kwtools['exc'])[1]
127 self.restrict = kwtools['hgcmd'] in restricted.split()
127 self.restrict = kwtools['hgcmd'] in restricted.split()
128
128
129 kwmaps = self.ui.configitems('keywordmaps')
129 kwmaps = self.ui.configitems('keywordmaps')
130 if kwmaps: # override default templates
130 if kwmaps: # override default templates
131 kwmaps = [(k, templater.parsestring(v, False))
131 kwmaps = [(k, templater.parsestring(v, False))
132 for (k, v) in kwmaps]
132 for (k, v) in kwmaps]
133 self.templates = dict(kwmaps)
133 self.templates = dict(kwmaps)
134 escaped = map(re.escape, self.templates.keys())
134 escaped = map(re.escape, self.templates.keys())
135 kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
135 kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
136 self.re_kw = re.compile(kwpat)
136 self.re_kw = re.compile(kwpat)
137
137
138 templatefilters.filters['utcdate'] = utcdate
138 templatefilters.filters['utcdate'] = utcdate
139 self.ct = cmdutil.changeset_templater(self.ui, self.repo,
139 self.ct = cmdutil.changeset_templater(self.ui, self.repo,
140 False, '', False)
140 False, '', False)
141
141
142 def getnode(self, path, fnode):
142 def getnode(self, path, fnode):
143 '''Derives changenode from file path and filenode.'''
143 '''Derives changenode from file path and filenode.'''
144 # used by kwfilelog.read and kwexpand
144 # used by kwfilelog.read and kwexpand
145 c = self.repo.filectx(path, fileid=fnode)
145 c = self.repo.filectx(path, fileid=fnode)
146 return c.node()
146 return c.node()
147
147
148 def substitute(self, data, path, node, subfunc):
148 def substitute(self, data, path, node, subfunc):
149 '''Replaces keywords in data with expanded template.'''
149 '''Replaces keywords in data with expanded template.'''
150 def kwsub(mobj):
150 def kwsub(mobj):
151 kw = mobj.group(1)
151 kw = mobj.group(1)
152 self.ct.use_template(self.templates[kw])
152 self.ct.use_template(self.templates[kw])
153 self.ui.pushbuffer()
153 self.ui.pushbuffer()
154 self.ct.show(changenode=node, root=self.repo.root, file=path)
154 self.ct.show(changenode=node, root=self.repo.root, file=path)
155 ekw = templatefilters.firstline(self.ui.popbuffer())
155 ekw = templatefilters.firstline(self.ui.popbuffer())
156 return '$%s: %s $' % (kw, ekw)
156 return '$%s: %s $' % (kw, ekw)
157 return subfunc(kwsub, data)
157 return subfunc(kwsub, data)
158
158
159 def expand(self, path, node, data):
159 def expand(self, path, node, data):
160 '''Returns data with keywords expanded.'''
160 '''Returns data with keywords expanded.'''
161 if not self.restrict and self.matcher(path) and not util.binary(data):
161 if not self.restrict and self.matcher(path) and not util.binary(data):
162 changenode = self.getnode(path, node)
162 changenode = self.getnode(path, node)
163 return self.substitute(data, path, changenode, self.re_kw.sub)
163 return self.substitute(data, path, changenode, self.re_kw.sub)
164 return data
164 return data
165
165
166 def iskwfile(self, path, flagfunc):
166 def iskwfile(self, path, flagfunc):
167 '''Returns true if path matches [keyword] pattern
167 '''Returns true if path matches [keyword] pattern
168 and is not a symbolic link.
168 and is not a symbolic link.
169 Caveat: localrepository._link fails on Windows.'''
169 Caveat: localrepository._link fails on Windows.'''
170 return self.matcher(path) and not 'l' in flagfunc(path)
170 return self.matcher(path) and not 'l' in flagfunc(path)
171
171
172 def overwrite(self, node, expand, files):
172 def overwrite(self, node, expand, files):
173 '''Overwrites selected files expanding/shrinking keywords.'''
173 '''Overwrites selected files expanding/shrinking keywords.'''
174 if node is not None: # commit
174 if node is not None: # commit
175 ctx = self.repo[node]
175 ctx = self.repo[node]
176 mf = ctx.manifest()
176 mf = ctx.manifest()
177 files = [f for f in ctx.files() if f in mf]
177 files = [f for f in ctx.files() if f in mf]
178 notify = self.ui.debug
178 notify = self.ui.debug
179 else: # kwexpand/kwshrink
179 else: # kwexpand/kwshrink
180 ctx = self.repo['.']
180 ctx = self.repo['.']
181 mf = ctx.manifest()
181 mf = ctx.manifest()
182 notify = self.ui.note
182 notify = self.ui.note
183 candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
183 candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
184 if candidates:
184 if candidates:
185 self.restrict = True # do not expand when reading
185 self.restrict = True # do not expand when reading
186 candidates.sort()
186 candidates.sort()
187 action = expand and 'expanding' or 'shrinking'
187 action = expand and 'expanding' or 'shrinking'
188 for f in candidates:
188 for f in candidates:
189 fp = self.repo.file(f)
189 fp = self.repo.file(f)
190 data = fp.read(mf[f])
190 data = fp.read(mf[f])
191 if util.binary(data):
191 if util.binary(data):
192 continue
192 continue
193 if expand:
193 if expand:
194 changenode = node or self.getnode(f, mf[f])
194 changenode = node or self.getnode(f, mf[f])
195 data, found = self.substitute(data, f, changenode,
195 data, found = self.substitute(data, f, changenode,
196 self.re_kw.subn)
196 self.re_kw.subn)
197 else:
197 else:
198 found = self.re_kw.search(data)
198 found = self.re_kw.search(data)
199 if found:
199 if found:
200 notify(_('overwriting %s %s keywords\n') % (f, action))
200 notify(_('overwriting %s %s keywords\n') % (f, action))
201 self.repo.wwrite(f, data, mf.flags(f))
201 self.repo.wwrite(f, data, mf.flags(f))
202 self.repo.dirstate.normal(f)
202 self.repo.dirstate.normal(f)
203 self.restrict = False
203 self.restrict = False
204
204
205 def shrinktext(self, text):
205 def shrinktext(self, text):
206 '''Unconditionally removes all keyword substitutions from text.'''
206 '''Unconditionally removes all keyword substitutions from text.'''
207 return self.re_kw.sub(r'$\1$', text)
207 return self.re_kw.sub(r'$\1$', text)
208
208
209 def shrink(self, fname, text):
209 def shrink(self, fname, text):
210 '''Returns text with all keyword substitutions removed.'''
210 '''Returns text with all keyword substitutions removed.'''
211 if self.matcher(fname) and not util.binary(text):
211 if self.matcher(fname) and not util.binary(text):
212 return self.shrinktext(text)
212 return self.shrinktext(text)
213 return text
213 return text
214
214
215 def shrinklines(self, fname, lines):
215 def shrinklines(self, fname, lines):
216 '''Returns lines with keyword substitutions removed.'''
216 '''Returns lines with keyword substitutions removed.'''
217 if self.matcher(fname):
217 if self.matcher(fname):
218 text = ''.join(lines)
218 text = ''.join(lines)
219 if not util.binary(text):
219 if not util.binary(text):
220 return self.shrinktext(text).splitlines(True)
220 return self.shrinktext(text).splitlines(True)
221 return lines
221 return lines
222
222
223 def wread(self, fname, data):
223 def wread(self, fname, data):
224 '''If in restricted mode returns data read from wdir with
224 '''If in restricted mode returns data read from wdir with
225 keyword substitutions removed.'''
225 keyword substitutions removed.'''
226 return self.restrict and self.shrink(fname, data) or data
226 return self.restrict and self.shrink(fname, data) or data
227
227
228 class kwfilelog(filelog.filelog):
228 class kwfilelog(filelog.filelog):
229 '''
229 '''
230 Subclass of filelog to hook into its read, add, cmp methods.
230 Subclass of filelog to hook into its read, add, cmp methods.
231 Keywords are "stored" unexpanded, and processed on reading.
231 Keywords are "stored" unexpanded, and processed on reading.
232 '''
232 '''
233 def __init__(self, opener, kwt, path):
233 def __init__(self, opener, kwt, path):
234 super(kwfilelog, self).__init__(opener, path)
234 super(kwfilelog, self).__init__(opener, path)
235 self.kwt = kwt
235 self.kwt = kwt
236 self.path = path
236 self.path = path
237
237
238 def read(self, node):
238 def read(self, node):
239 '''Expands keywords when reading filelog.'''
239 '''Expands keywords when reading filelog.'''
240 data = super(kwfilelog, self).read(node)
240 data = super(kwfilelog, self).read(node)
241 return self.kwt.expand(self.path, node, data)
241 return self.kwt.expand(self.path, node, data)
242
242
243 def add(self, text, meta, tr, link, p1=None, p2=None):
243 def add(self, text, meta, tr, link, p1=None, p2=None):
244 '''Removes keyword substitutions when adding to filelog.'''
244 '''Removes keyword substitutions when adding to filelog.'''
245 text = self.kwt.shrink(self.path, text)
245 text = self.kwt.shrink(self.path, text)
246 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
246 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
247
247
248 def cmp(self, node, text):
248 def cmp(self, node, text):
249 '''Removes keyword substitutions for comparison.'''
249 '''Removes keyword substitutions for comparison.'''
250 text = self.kwt.shrink(self.path, text)
250 text = self.kwt.shrink(self.path, text)
251 if self.renamed(node):
251 if self.renamed(node):
252 t2 = super(kwfilelog, self).read(node)
252 t2 = super(kwfilelog, self).read(node)
253 return t2 != text
253 return t2 != text
254 return revlog.revlog.cmp(self, node, text)
254 return revlog.revlog.cmp(self, node, text)
255
255
256 def _status(ui, repo, kwt, *pats, **opts):
256 def _status(ui, repo, kwt, *pats, **opts):
257 '''Bails out if [keyword] configuration is not active.
257 '''Bails out if [keyword] configuration is not active.
258 Returns status of working directory.'''
258 Returns status of working directory.'''
259 if kwt:
259 if kwt:
260 matcher = cmdutil.match(repo, pats, opts)
260 matcher = cmdutil.match(repo, pats, opts)
261 return repo.status(match=matcher, list_clean=True)
261 return repo.status(match=matcher, clean=True)
262 if ui.configitems('keyword'):
262 if ui.configitems('keyword'):
263 raise util.Abort(_('[keyword] patterns cannot match'))
263 raise util.Abort(_('[keyword] patterns cannot match'))
264 raise util.Abort(_('no [keyword] patterns configured'))
264 raise util.Abort(_('no [keyword] patterns configured'))
265
265
266 def _kwfwrite(ui, repo, expand, *pats, **opts):
266 def _kwfwrite(ui, repo, expand, *pats, **opts):
267 '''Selects files and passes them to kwtemplater.overwrite.'''
267 '''Selects files and passes them to kwtemplater.overwrite.'''
268 if repo.dirstate.parents()[1] != nullid:
268 if repo.dirstate.parents()[1] != nullid:
269 raise util.Abort(_('outstanding uncommitted merge'))
269 raise util.Abort(_('outstanding uncommitted merge'))
270 kwt = kwtools['templater']
270 kwt = kwtools['templater']
271 status = _status(ui, repo, kwt, *pats, **opts)
271 status = _status(ui, repo, kwt, *pats, **opts)
272 modified, added, removed, deleted, unknown, ignored, clean = status
272 modified, added, removed, deleted, unknown, ignored, clean = status
273 if modified or added or removed or deleted:
273 if modified or added or removed or deleted:
274 raise util.Abort(_('outstanding uncommitted changes'))
274 raise util.Abort(_('outstanding uncommitted changes'))
275 wlock = lock = None
275 wlock = lock = None
276 try:
276 try:
277 wlock = repo.wlock()
277 wlock = repo.wlock()
278 lock = repo.lock()
278 lock = repo.lock()
279 kwt.overwrite(None, expand, clean)
279 kwt.overwrite(None, expand, clean)
280 finally:
280 finally:
281 del wlock, lock
281 del wlock, lock
282
282
283
283
284 def demo(ui, repo, *args, **opts):
284 def demo(ui, repo, *args, **opts):
285 '''print [keywordmaps] configuration and an expansion example
285 '''print [keywordmaps] configuration and an expansion example
286
286
287 Show current, custom, or default keyword template maps
287 Show current, custom, or default keyword template maps
288 and their expansion.
288 and their expansion.
289
289
290 Extend current configuration by specifying maps as arguments
290 Extend current configuration by specifying maps as arguments
291 and optionally by reading from an additional hgrc file.
291 and optionally by reading from an additional hgrc file.
292
292
293 Override current keyword template maps with "default" option.
293 Override current keyword template maps with "default" option.
294 '''
294 '''
295 def demostatus(stat):
295 def demostatus(stat):
296 ui.status(_('\n\t%s\n') % stat)
296 ui.status(_('\n\t%s\n') % stat)
297
297
298 def demoitems(section, items):
298 def demoitems(section, items):
299 ui.write('[%s]\n' % section)
299 ui.write('[%s]\n' % section)
300 for k, v in items:
300 for k, v in items:
301 ui.write('%s = %s\n' % (k, v))
301 ui.write('%s = %s\n' % (k, v))
302
302
303 msg = 'hg keyword config and expansion example'
303 msg = 'hg keyword config and expansion example'
304 kwstatus = 'current'
304 kwstatus = 'current'
305 fn = 'demo.txt'
305 fn = 'demo.txt'
306 branchname = 'demobranch'
306 branchname = 'demobranch'
307 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
307 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
308 ui.note(_('creating temporary repo at %s\n') % tmpdir)
308 ui.note(_('creating temporary repo at %s\n') % tmpdir)
309 repo = localrepo.localrepository(ui, tmpdir, True)
309 repo = localrepo.localrepository(ui, tmpdir, True)
310 ui.setconfig('keyword', fn, '')
310 ui.setconfig('keyword', fn, '')
311 if args or opts.get('rcfile'):
311 if args or opts.get('rcfile'):
312 kwstatus = 'custom'
312 kwstatus = 'custom'
313 if opts.get('rcfile'):
313 if opts.get('rcfile'):
314 ui.readconfig(opts.get('rcfile'))
314 ui.readconfig(opts.get('rcfile'))
315 if opts.get('default'):
315 if opts.get('default'):
316 kwstatus = 'default'
316 kwstatus = 'default'
317 kwmaps = kwtemplater.templates
317 kwmaps = kwtemplater.templates
318 if ui.configitems('keywordmaps'):
318 if ui.configitems('keywordmaps'):
319 # override maps from optional rcfile
319 # override maps from optional rcfile
320 for k, v in kwmaps.iteritems():
320 for k, v in kwmaps.iteritems():
321 ui.setconfig('keywordmaps', k, v)
321 ui.setconfig('keywordmaps', k, v)
322 elif args:
322 elif args:
323 # simulate hgrc parsing
323 # simulate hgrc parsing
324 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
324 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
325 fp = repo.opener('hgrc', 'w')
325 fp = repo.opener('hgrc', 'w')
326 fp.writelines(rcmaps)
326 fp.writelines(rcmaps)
327 fp.close()
327 fp.close()
328 ui.readconfig(repo.join('hgrc'))
328 ui.readconfig(repo.join('hgrc'))
329 if not opts.get('default'):
329 if not opts.get('default'):
330 kwmaps = dict(ui.configitems('keywordmaps')) or kwtemplater.templates
330 kwmaps = dict(ui.configitems('keywordmaps')) or kwtemplater.templates
331 uisetup(ui)
331 uisetup(ui)
332 reposetup(ui, repo)
332 reposetup(ui, repo)
333 for k, v in ui.configitems('extensions'):
333 for k, v in ui.configitems('extensions'):
334 if k.endswith('keyword'):
334 if k.endswith('keyword'):
335 extension = '%s = %s' % (k, v)
335 extension = '%s = %s' % (k, v)
336 break
336 break
337 demostatus('config using %s keyword template maps' % kwstatus)
337 demostatus('config using %s keyword template maps' % kwstatus)
338 ui.write('[extensions]\n%s\n' % extension)
338 ui.write('[extensions]\n%s\n' % extension)
339 demoitems('keyword', ui.configitems('keyword'))
339 demoitems('keyword', ui.configitems('keyword'))
340 demoitems('keywordmaps', kwmaps.iteritems())
340 demoitems('keywordmaps', kwmaps.iteritems())
341 keywords = '$' + '$\n$'.join(kwmaps.keys()) + '$\n'
341 keywords = '$' + '$\n$'.join(kwmaps.keys()) + '$\n'
342 repo.wopener(fn, 'w').write(keywords)
342 repo.wopener(fn, 'w').write(keywords)
343 repo.add([fn])
343 repo.add([fn])
344 path = repo.wjoin(fn)
344 path = repo.wjoin(fn)
345 ui.note(_('\n%s keywords written to %s:\n') % (kwstatus, path))
345 ui.note(_('\n%s keywords written to %s:\n') % (kwstatus, path))
346 ui.note(keywords)
346 ui.note(keywords)
347 ui.note('\nhg -R "%s" branch "%s"\n' % (tmpdir, branchname))
347 ui.note('\nhg -R "%s" branch "%s"\n' % (tmpdir, branchname))
348 # silence branch command if not verbose
348 # silence branch command if not verbose
349 quiet = ui.quiet
349 quiet = ui.quiet
350 ui.quiet = not ui.verbose
350 ui.quiet = not ui.verbose
351 commands.branch(ui, repo, branchname)
351 commands.branch(ui, repo, branchname)
352 ui.quiet = quiet
352 ui.quiet = quiet
353 for name, cmd in ui.configitems('hooks'):
353 for name, cmd in ui.configitems('hooks'):
354 if name.split('.', 1)[0].find('commit') > -1:
354 if name.split('.', 1)[0].find('commit') > -1:
355 repo.ui.setconfig('hooks', name, '')
355 repo.ui.setconfig('hooks', name, '')
356 ui.note(_('unhooked all commit hooks\n'))
356 ui.note(_('unhooked all commit hooks\n'))
357 ui.note('hg -R "%s" ci -m "%s"\n' % (tmpdir, msg))
357 ui.note('hg -R "%s" ci -m "%s"\n' % (tmpdir, msg))
358 repo.commit(text=msg)
358 repo.commit(text=msg)
359 format = ui.verbose and ' in %s' % path or ''
359 format = ui.verbose and ' in %s' % path or ''
360 demostatus('%s keywords expanded%s' % (kwstatus, format))
360 demostatus('%s keywords expanded%s' % (kwstatus, format))
361 ui.write(repo.wread(fn))
361 ui.write(repo.wread(fn))
362 ui.debug(_('\nremoving temporary repo %s\n') % tmpdir)
362 ui.debug(_('\nremoving temporary repo %s\n') % tmpdir)
363 shutil.rmtree(tmpdir, ignore_errors=True)
363 shutil.rmtree(tmpdir, ignore_errors=True)
364
364
365 def expand(ui, repo, *pats, **opts):
365 def expand(ui, repo, *pats, **opts):
366 '''expand keywords in working directory
366 '''expand keywords in working directory
367
367
368 Run after (re)enabling keyword expansion.
368 Run after (re)enabling keyword expansion.
369
369
370 kwexpand refuses to run if given files contain local changes.
370 kwexpand refuses to run if given files contain local changes.
371 '''
371 '''
372 # 3rd argument sets expansion to True
372 # 3rd argument sets expansion to True
373 _kwfwrite(ui, repo, True, *pats, **opts)
373 _kwfwrite(ui, repo, True, *pats, **opts)
374
374
375 def files(ui, repo, *pats, **opts):
375 def files(ui, repo, *pats, **opts):
376 '''print files currently configured for keyword expansion
376 '''print files currently configured for keyword expansion
377
377
378 Crosscheck which files in working directory are potential targets for
378 Crosscheck which files in working directory are potential targets for
379 keyword expansion.
379 keyword expansion.
380 That is, files matched by [keyword] config patterns but not symlinks.
380 That is, files matched by [keyword] config patterns but not symlinks.
381 '''
381 '''
382 kwt = kwtools['templater']
382 kwt = kwtools['templater']
383 status = _status(ui, repo, kwt, *pats, **opts)
383 status = _status(ui, repo, kwt, *pats, **opts)
384 modified, added, removed, deleted, unknown, ignored, clean = status
384 modified, added, removed, deleted, unknown, ignored, clean = status
385 files = modified + added + clean
385 files = modified + added + clean
386 if opts.get('untracked'):
386 if opts.get('untracked'):
387 files += unknown
387 files += unknown
388 files.sort()
388 files.sort()
389 wctx = repo[None]
389 wctx = repo[None]
390 kwfiles = [f for f in files if kwt.iskwfile(f, wctx.flags)]
390 kwfiles = [f for f in files if kwt.iskwfile(f, wctx.flags)]
391 cwd = pats and repo.getcwd() or ''
391 cwd = pats and repo.getcwd() or ''
392 kwfstats = not opts.get('ignore') and (('K', kwfiles),) or ()
392 kwfstats = not opts.get('ignore') and (('K', kwfiles),) or ()
393 if opts.get('all') or opts.get('ignore'):
393 if opts.get('all') or opts.get('ignore'):
394 kwfstats += (('I', [f for f in files if f not in kwfiles]),)
394 kwfstats += (('I', [f for f in files if f not in kwfiles]),)
395 for char, filenames in kwfstats:
395 for char, filenames in kwfstats:
396 format = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
396 format = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
397 for f in filenames:
397 for f in filenames:
398 ui.write(format % repo.pathto(f, cwd))
398 ui.write(format % repo.pathto(f, cwd))
399
399
400 def shrink(ui, repo, *pats, **opts):
400 def shrink(ui, repo, *pats, **opts):
401 '''revert expanded keywords in working directory
401 '''revert expanded keywords in working directory
402
402
403 Run before changing/disabling active keywords
403 Run before changing/disabling active keywords
404 or if you experience problems with "hg import" or "hg merge".
404 or if you experience problems with "hg import" or "hg merge".
405
405
406 kwshrink refuses to run if given files contain local changes.
406 kwshrink refuses to run if given files contain local changes.
407 '''
407 '''
408 # 3rd argument sets expansion to False
408 # 3rd argument sets expansion to False
409 _kwfwrite(ui, repo, False, *pats, **opts)
409 _kwfwrite(ui, repo, False, *pats, **opts)
410
410
411
411
412 def uisetup(ui):
412 def uisetup(ui):
413 '''Collects [keyword] config in kwtools.
413 '''Collects [keyword] config in kwtools.
414 Monkeypatches dispatch._parse if needed.'''
414 Monkeypatches dispatch._parse if needed.'''
415
415
416 for pat, opt in ui.configitems('keyword'):
416 for pat, opt in ui.configitems('keyword'):
417 if opt != 'ignore':
417 if opt != 'ignore':
418 kwtools['inc'].append(pat)
418 kwtools['inc'].append(pat)
419 else:
419 else:
420 kwtools['exc'].append(pat)
420 kwtools['exc'].append(pat)
421
421
422 if kwtools['inc']:
422 if kwtools['inc']:
423 def kwdispatch_parse(ui, args):
423 def kwdispatch_parse(ui, args):
424 '''Monkeypatch dispatch._parse to obtain running hg command.'''
424 '''Monkeypatch dispatch._parse to obtain running hg command.'''
425 cmd, func, args, options, cmdoptions = dispatch_parse(ui, args)
425 cmd, func, args, options, cmdoptions = dispatch_parse(ui, args)
426 kwtools['hgcmd'] = cmd
426 kwtools['hgcmd'] = cmd
427 return cmd, func, args, options, cmdoptions
427 return cmd, func, args, options, cmdoptions
428
428
429 dispatch_parse = dispatch._parse
429 dispatch_parse = dispatch._parse
430 dispatch._parse = kwdispatch_parse
430 dispatch._parse = kwdispatch_parse
431
431
432 def reposetup(ui, repo):
432 def reposetup(ui, repo):
433 '''Sets up repo as kwrepo for keyword substitution.
433 '''Sets up repo as kwrepo for keyword substitution.
434 Overrides file method to return kwfilelog instead of filelog
434 Overrides file method to return kwfilelog instead of filelog
435 if file matches user configuration.
435 if file matches user configuration.
436 Wraps commit to overwrite configured files with updated
436 Wraps commit to overwrite configured files with updated
437 keyword substitutions.
437 keyword substitutions.
438 Monkeypatches patch and webcommands.'''
438 Monkeypatches patch and webcommands.'''
439
439
440 try:
440 try:
441 if (not repo.local() or not kwtools['inc']
441 if (not repo.local() or not kwtools['inc']
442 or kwtools['hgcmd'] in nokwcommands.split()
442 or kwtools['hgcmd'] in nokwcommands.split()
443 or '.hg' in util.splitpath(repo.root)
443 or '.hg' in util.splitpath(repo.root)
444 or repo._url.startswith('bundle:')):
444 or repo._url.startswith('bundle:')):
445 return
445 return
446 except AttributeError:
446 except AttributeError:
447 pass
447 pass
448
448
449 kwtools['templater'] = kwt = kwtemplater(ui, repo)
449 kwtools['templater'] = kwt = kwtemplater(ui, repo)
450
450
451 class kwrepo(repo.__class__):
451 class kwrepo(repo.__class__):
452 def file(self, f):
452 def file(self, f):
453 if f[0] == '/':
453 if f[0] == '/':
454 f = f[1:]
454 f = f[1:]
455 return kwfilelog(self.sopener, kwt, f)
455 return kwfilelog(self.sopener, kwt, f)
456
456
457 def wread(self, filename):
457 def wread(self, filename):
458 data = super(kwrepo, self).wread(filename)
458 data = super(kwrepo, self).wread(filename)
459 return kwt.wread(filename, data)
459 return kwt.wread(filename, data)
460
460
461 def commit(self, files=None, text='', user=None, date=None,
461 def commit(self, files=None, text='', user=None, date=None,
462 match=None, force=False, force_editor=False,
462 match=None, force=False, force_editor=False,
463 p1=None, p2=None, extra={}, empty_ok=False):
463 p1=None, p2=None, extra={}, empty_ok=False):
464 wlock = lock = None
464 wlock = lock = None
465 _p1 = _p2 = None
465 _p1 = _p2 = None
466 try:
466 try:
467 wlock = self.wlock()
467 wlock = self.wlock()
468 lock = self.lock()
468 lock = self.lock()
469 # store and postpone commit hooks
469 # store and postpone commit hooks
470 commithooks = {}
470 commithooks = {}
471 for name, cmd in ui.configitems('hooks'):
471 for name, cmd in ui.configitems('hooks'):
472 if name.split('.', 1)[0] == 'commit':
472 if name.split('.', 1)[0] == 'commit':
473 commithooks[name] = cmd
473 commithooks[name] = cmd
474 ui.setconfig('hooks', name, None)
474 ui.setconfig('hooks', name, None)
475 if commithooks:
475 if commithooks:
476 # store parents for commit hook environment
476 # store parents for commit hook environment
477 if p1 is None:
477 if p1 is None:
478 _p1, _p2 = repo.dirstate.parents()
478 _p1, _p2 = repo.dirstate.parents()
479 else:
479 else:
480 _p1, _p2 = p1, p2 or nullid
480 _p1, _p2 = p1, p2 or nullid
481 _p1 = hex(_p1)
481 _p1 = hex(_p1)
482 if _p2 == nullid:
482 if _p2 == nullid:
483 _p2 = ''
483 _p2 = ''
484 else:
484 else:
485 _p2 = hex(_p2)
485 _p2 = hex(_p2)
486
486
487 n = super(kwrepo, self).commit(files, text, user, date, match,
487 n = super(kwrepo, self).commit(files, text, user, date, match,
488 force, force_editor, p1, p2,
488 force, force_editor, p1, p2,
489 extra, empty_ok)
489 extra, empty_ok)
490
490
491 # restore commit hooks
491 # restore commit hooks
492 for name, cmd in commithooks.iteritems():
492 for name, cmd in commithooks.iteritems():
493 ui.setconfig('hooks', name, cmd)
493 ui.setconfig('hooks', name, cmd)
494 if n is not None:
494 if n is not None:
495 kwt.overwrite(n, True, None)
495 kwt.overwrite(n, True, None)
496 repo.hook('commit', node=n, parent1=_p1, parent2=_p2)
496 repo.hook('commit', node=n, parent1=_p1, parent2=_p2)
497 return n
497 return n
498 finally:
498 finally:
499 del wlock, lock
499 del wlock, lock
500
500
501 # monkeypatches
501 # monkeypatches
502 def kwpatchfile_init(self, ui, fname, missing=False):
502 def kwpatchfile_init(self, ui, fname, missing=False):
503 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
503 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
504 rejects or conflicts due to expanded keywords in working dir.'''
504 rejects or conflicts due to expanded keywords in working dir.'''
505 patchfile_init(self, ui, fname, missing)
505 patchfile_init(self, ui, fname, missing)
506 # shrink keywords read from working dir
506 # shrink keywords read from working dir
507 self.lines = kwt.shrinklines(self.fname, self.lines)
507 self.lines = kwt.shrinklines(self.fname, self.lines)
508
508
509 def kw_diff(repo, node1=None, node2=None, match=None,
509 def kw_diff(repo, node1=None, node2=None, match=None,
510 fp=None, changes=None, opts=None):
510 fp=None, changes=None, opts=None):
511 '''Monkeypatch patch.diff to avoid expansion except when
511 '''Monkeypatch patch.diff to avoid expansion except when
512 comparing against working dir.'''
512 comparing against working dir.'''
513 if node2 is not None:
513 if node2 is not None:
514 kwt.matcher = util.never
514 kwt.matcher = util.never
515 elif node1 is not None and node1 != repo['.'].node():
515 elif node1 is not None and node1 != repo['.'].node():
516 kwt.restrict = True
516 kwt.restrict = True
517 patch_diff(repo, node1, node2, match, fp, changes, opts)
517 patch_diff(repo, node1, node2, match, fp, changes, opts)
518
518
519 def kwweb_annotate(web, req, tmpl):
519 def kwweb_annotate(web, req, tmpl):
520 '''Wraps webcommands.annotate turning off keyword expansion.'''
520 '''Wraps webcommands.annotate turning off keyword expansion.'''
521 kwt.matcher = util.never
521 kwt.matcher = util.never
522 return webcommands_annotate(web, req, tmpl)
522 return webcommands_annotate(web, req, tmpl)
523
523
524 def kwweb_changeset(web, req, tmpl):
524 def kwweb_changeset(web, req, tmpl):
525 '''Wraps webcommands.changeset turning off keyword expansion.'''
525 '''Wraps webcommands.changeset turning off keyword expansion.'''
526 kwt.matcher = util.never
526 kwt.matcher = util.never
527 return webcommands_changeset(web, req, tmpl)
527 return webcommands_changeset(web, req, tmpl)
528
528
529 def kwweb_filediff(web, req, tmpl):
529 def kwweb_filediff(web, req, tmpl):
530 '''Wraps webcommands.filediff turning off keyword expansion.'''
530 '''Wraps webcommands.filediff turning off keyword expansion.'''
531 kwt.matcher = util.never
531 kwt.matcher = util.never
532 return webcommands_filediff(web, req, tmpl)
532 return webcommands_filediff(web, req, tmpl)
533
533
534 repo.__class__ = kwrepo
534 repo.__class__ = kwrepo
535
535
536 patchfile_init = patch.patchfile.__init__
536 patchfile_init = patch.patchfile.__init__
537 patch_diff = patch.diff
537 patch_diff = patch.diff
538 webcommands_annotate = webcommands.annotate
538 webcommands_annotate = webcommands.annotate
539 webcommands_changeset = webcommands.changeset
539 webcommands_changeset = webcommands.changeset
540 webcommands_filediff = webcommands.filediff
540 webcommands_filediff = webcommands.filediff
541
541
542 patch.patchfile.__init__ = kwpatchfile_init
542 patch.patchfile.__init__ = kwpatchfile_init
543 patch.diff = kw_diff
543 patch.diff = kw_diff
544 webcommands.annotate = kwweb_annotate
544 webcommands.annotate = kwweb_annotate
545 webcommands.changeset = webcommands.rev = kwweb_changeset
545 webcommands.changeset = webcommands.rev = kwweb_changeset
546 webcommands.filediff = webcommands.diff = kwweb_filediff
546 webcommands.filediff = webcommands.diff = kwweb_filediff
547
547
548
548
549 cmdtable = {
549 cmdtable = {
550 'kwdemo':
550 'kwdemo':
551 (demo,
551 (demo,
552 [('d', 'default', None, _('show default keyword template maps')),
552 [('d', 'default', None, _('show default keyword template maps')),
553 ('f', 'rcfile', [], _('read maps from rcfile'))],
553 ('f', 'rcfile', [], _('read maps from rcfile'))],
554 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
554 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
555 'kwexpand': (expand, commands.walkopts,
555 'kwexpand': (expand, commands.walkopts,
556 _('hg kwexpand [OPTION]... [FILE]...')),
556 _('hg kwexpand [OPTION]... [FILE]...')),
557 'kwfiles':
557 'kwfiles':
558 (files,
558 (files,
559 [('a', 'all', None, _('show keyword status flags of all files')),
559 [('a', 'all', None, _('show keyword status flags of all files')),
560 ('i', 'ignore', None, _('show files excluded from expansion')),
560 ('i', 'ignore', None, _('show files excluded from expansion')),
561 ('u', 'untracked', None, _('additionally show untracked files')),
561 ('u', 'untracked', None, _('additionally show untracked files')),
562 ] + commands.walkopts,
562 ] + commands.walkopts,
563 _('hg kwfiles [OPTION]... [FILE]...')),
563 _('hg kwfiles [OPTION]... [FILE]...')),
564 'kwshrink': (shrink, commands.walkopts,
564 'kwshrink': (shrink, commands.walkopts,
565 _('hg kwshrink [OPTION]... [FILE]...')),
565 _('hg kwshrink [OPTION]... [FILE]...')),
566 }
566 }
@@ -1,698 +1,699 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import nullid
10 from node import nullid
11 from i18n import _
11 from i18n import _
12 import struct, os, bisect, stat, strutil, util, errno, ignore
12 import struct, os, bisect, stat, strutil, util, errno, ignore
13 import cStringIO, osutil, sys
13 import cStringIO, osutil, sys
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 class dirstate(object):
18 class dirstate(object):
19
19
20 def __init__(self, opener, ui, root):
20 def __init__(self, opener, ui, root):
21 self._opener = opener
21 self._opener = opener
22 self._root = root
22 self._root = root
23 self._dirty = False
23 self._dirty = False
24 self._dirtypl = False
24 self._dirtypl = False
25 self._ui = ui
25 self._ui = ui
26
26
27 def __getattr__(self, name):
27 def __getattr__(self, name):
28 if name == '_map':
28 if name == '_map':
29 self._read()
29 self._read()
30 return self._map
30 return self._map
31 elif name == '_copymap':
31 elif name == '_copymap':
32 self._read()
32 self._read()
33 return self._copymap
33 return self._copymap
34 elif name == '_foldmap':
34 elif name == '_foldmap':
35 _foldmap = {}
35 _foldmap = {}
36 for name in self._map:
36 for name in self._map:
37 norm = os.path.normcase(os.path.normpath(name))
37 norm = os.path.normcase(os.path.normpath(name))
38 _foldmap[norm] = name
38 _foldmap[norm] = name
39 self._foldmap = _foldmap
39 self._foldmap = _foldmap
40 return self._foldmap
40 return self._foldmap
41 elif name == '_branch':
41 elif name == '_branch':
42 try:
42 try:
43 self._branch = (self._opener("branch").read().strip()
43 self._branch = (self._opener("branch").read().strip()
44 or "default")
44 or "default")
45 except IOError:
45 except IOError:
46 self._branch = "default"
46 self._branch = "default"
47 return self._branch
47 return self._branch
48 elif name == '_pl':
48 elif name == '_pl':
49 self._pl = [nullid, nullid]
49 self._pl = [nullid, nullid]
50 try:
50 try:
51 st = self._opener("dirstate").read(40)
51 st = self._opener("dirstate").read(40)
52 if len(st) == 40:
52 if len(st) == 40:
53 self._pl = st[:20], st[20:40]
53 self._pl = st[:20], st[20:40]
54 except IOError, err:
54 except IOError, err:
55 if err.errno != errno.ENOENT: raise
55 if err.errno != errno.ENOENT: raise
56 return self._pl
56 return self._pl
57 elif name == '_dirs':
57 elif name == '_dirs':
58 self._dirs = {}
58 self._dirs = {}
59 for f in self._map:
59 for f in self._map:
60 if self[f] != 'r':
60 if self[f] != 'r':
61 self._incpath(f)
61 self._incpath(f)
62 return self._dirs
62 return self._dirs
63 elif name == '_ignore':
63 elif name == '_ignore':
64 files = [self._join('.hgignore')]
64 files = [self._join('.hgignore')]
65 for name, path in self._ui.configitems("ui"):
65 for name, path in self._ui.configitems("ui"):
66 if name == 'ignore' or name.startswith('ignore.'):
66 if name == 'ignore' or name.startswith('ignore.'):
67 files.append(os.path.expanduser(path))
67 files.append(os.path.expanduser(path))
68 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
68 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
69 return self._ignore
69 return self._ignore
70 elif name == '_slash':
70 elif name == '_slash':
71 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
71 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
72 return self._slash
72 return self._slash
73 elif name == '_checklink':
73 elif name == '_checklink':
74 self._checklink = util.checklink(self._root)
74 self._checklink = util.checklink(self._root)
75 return self._checklink
75 return self._checklink
76 elif name == '_checkexec':
76 elif name == '_checkexec':
77 self._checkexec = util.checkexec(self._root)
77 self._checkexec = util.checkexec(self._root)
78 return self._checkexec
78 return self._checkexec
79 elif name == '_checkcase':
79 elif name == '_checkcase':
80 self._checkcase = not util.checkcase(self._join('.hg'))
80 self._checkcase = not util.checkcase(self._join('.hg'))
81 return self._checkcase
81 return self._checkcase
82 elif name == 'normalize':
82 elif name == 'normalize':
83 if self._checkcase:
83 if self._checkcase:
84 self.normalize = self._normalize
84 self.normalize = self._normalize
85 else:
85 else:
86 self.normalize = lambda x: x
86 self.normalize = lambda x: x
87 return self.normalize
87 return self.normalize
88 else:
88 else:
89 raise AttributeError, name
89 raise AttributeError, name
90
90
91 def _join(self, f):
91 def _join(self, f):
92 return os.path.join(self._root, f)
92 return os.path.join(self._root, f)
93
93
94 def flagfunc(self, fallback):
94 def flagfunc(self, fallback):
95 if self._checklink:
95 if self._checklink:
96 if self._checkexec:
96 if self._checkexec:
97 def f(x):
97 def f(x):
98 p = os.path.join(self._root, x)
98 p = os.path.join(self._root, x)
99 if os.path.islink(p):
99 if os.path.islink(p):
100 return 'l'
100 return 'l'
101 if util.is_exec(p):
101 if util.is_exec(p):
102 return 'x'
102 return 'x'
103 return ''
103 return ''
104 return f
104 return f
105 def f(x):
105 def f(x):
106 if os.path.islink(os.path.join(self._root, x)):
106 if os.path.islink(os.path.join(self._root, x)):
107 return 'l'
107 return 'l'
108 if 'x' in fallback(x):
108 if 'x' in fallback(x):
109 return 'x'
109 return 'x'
110 return ''
110 return ''
111 return f
111 return f
112 if self._checkexec:
112 if self._checkexec:
113 def f(x):
113 def f(x):
114 if 'l' in fallback(x):
114 if 'l' in fallback(x):
115 return 'l'
115 return 'l'
116 if util.is_exec(os.path.join(self._root, x)):
116 if util.is_exec(os.path.join(self._root, x)):
117 return 'x'
117 return 'x'
118 return ''
118 return ''
119 return f
119 return f
120 return fallback
120 return fallback
121
121
122 def getcwd(self):
122 def getcwd(self):
123 cwd = os.getcwd()
123 cwd = os.getcwd()
124 if cwd == self._root: return ''
124 if cwd == self._root: return ''
125 # self._root ends with a path separator if self._root is '/' or 'C:\'
125 # self._root ends with a path separator if self._root is '/' or 'C:\'
126 rootsep = self._root
126 rootsep = self._root
127 if not util.endswithsep(rootsep):
127 if not util.endswithsep(rootsep):
128 rootsep += os.sep
128 rootsep += os.sep
129 if cwd.startswith(rootsep):
129 if cwd.startswith(rootsep):
130 return cwd[len(rootsep):]
130 return cwd[len(rootsep):]
131 else:
131 else:
132 # we're outside the repo. return an absolute path.
132 # we're outside the repo. return an absolute path.
133 return cwd
133 return cwd
134
134
135 def pathto(self, f, cwd=None):
135 def pathto(self, f, cwd=None):
136 if cwd is None:
136 if cwd is None:
137 cwd = self.getcwd()
137 cwd = self.getcwd()
138 path = util.pathto(self._root, cwd, f)
138 path = util.pathto(self._root, cwd, f)
139 if self._slash:
139 if self._slash:
140 return util.normpath(path)
140 return util.normpath(path)
141 return path
141 return path
142
142
143 def __getitem__(self, key):
143 def __getitem__(self, key):
144 ''' current states:
144 ''' current states:
145 n normal
145 n normal
146 m needs merging
146 m needs merging
147 r marked for removal
147 r marked for removal
148 a marked for addition
148 a marked for addition
149 ? not tracked'''
149 ? not tracked'''
150 return self._map.get(key, ("?",))[0]
150 return self._map.get(key, ("?",))[0]
151
151
152 def __contains__(self, key):
152 def __contains__(self, key):
153 return key in self._map
153 return key in self._map
154
154
155 def __iter__(self):
155 def __iter__(self):
156 a = self._map.keys()
156 a = self._map.keys()
157 a.sort()
157 a.sort()
158 for x in a:
158 for x in a:
159 yield x
159 yield x
160
160
161 def parents(self):
161 def parents(self):
162 return self._pl
162 return self._pl
163
163
164 def branch(self):
164 def branch(self):
165 return self._branch
165 return self._branch
166
166
167 def setparents(self, p1, p2=nullid):
167 def setparents(self, p1, p2=nullid):
168 self._dirty = self._dirtypl = True
168 self._dirty = self._dirtypl = True
169 self._pl = p1, p2
169 self._pl = p1, p2
170
170
171 def setbranch(self, branch):
171 def setbranch(self, branch):
172 self._branch = branch
172 self._branch = branch
173 self._opener("branch", "w").write(branch + '\n')
173 self._opener("branch", "w").write(branch + '\n')
174
174
175 def _read(self):
175 def _read(self):
176 self._map = {}
176 self._map = {}
177 self._copymap = {}
177 self._copymap = {}
178 if not self._dirtypl:
178 if not self._dirtypl:
179 self._pl = [nullid, nullid]
179 self._pl = [nullid, nullid]
180 try:
180 try:
181 st = self._opener("dirstate").read()
181 st = self._opener("dirstate").read()
182 except IOError, err:
182 except IOError, err:
183 if err.errno != errno.ENOENT: raise
183 if err.errno != errno.ENOENT: raise
184 return
184 return
185 if not st:
185 if not st:
186 return
186 return
187
187
188 if not self._dirtypl:
188 if not self._dirtypl:
189 self._pl = [st[:20], st[20: 40]]
189 self._pl = [st[:20], st[20: 40]]
190
190
191 # deref fields so they will be local in loop
191 # deref fields so they will be local in loop
192 dmap = self._map
192 dmap = self._map
193 copymap = self._copymap
193 copymap = self._copymap
194 unpack = struct.unpack
194 unpack = struct.unpack
195 e_size = struct.calcsize(_format)
195 e_size = struct.calcsize(_format)
196 pos1 = 40
196 pos1 = 40
197 l = len(st)
197 l = len(st)
198
198
199 # the inner loop
199 # the inner loop
200 while pos1 < l:
200 while pos1 < l:
201 pos2 = pos1 + e_size
201 pos2 = pos1 + e_size
202 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
202 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
203 pos1 = pos2 + e[4]
203 pos1 = pos2 + e[4]
204 f = st[pos2:pos1]
204 f = st[pos2:pos1]
205 if '\0' in f:
205 if '\0' in f:
206 f, c = f.split('\0')
206 f, c = f.split('\0')
207 copymap[f] = c
207 copymap[f] = c
208 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
208 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
209
209
210 def invalidate(self):
210 def invalidate(self):
211 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
211 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
212 if a in self.__dict__:
212 if a in self.__dict__:
213 delattr(self, a)
213 delattr(self, a)
214 self._dirty = False
214 self._dirty = False
215
215
216 def copy(self, source, dest):
216 def copy(self, source, dest):
217 if source == dest:
217 if source == dest:
218 return
218 return
219 self._dirty = True
219 self._dirty = True
220 self._copymap[dest] = source
220 self._copymap[dest] = source
221
221
222 def copied(self, file):
222 def copied(self, file):
223 return self._copymap.get(file, None)
223 return self._copymap.get(file, None)
224
224
225 def copies(self):
225 def copies(self):
226 return self._copymap
226 return self._copymap
227
227
228 def _incpath(self, path):
228 def _incpath(self, path):
229 c = path.rfind('/')
229 c = path.rfind('/')
230 if c >= 0:
230 if c >= 0:
231 dirs = self._dirs
231 dirs = self._dirs
232 base = path[:c]
232 base = path[:c]
233 if base not in dirs:
233 if base not in dirs:
234 self._incpath(base)
234 self._incpath(base)
235 dirs[base] = 1
235 dirs[base] = 1
236 else:
236 else:
237 dirs[base] += 1
237 dirs[base] += 1
238
238
239 def _decpath(self, path):
239 def _decpath(self, path):
240 c = path.rfind('/')
240 c = path.rfind('/')
241 if c >= 0:
241 if c >= 0:
242 base = path[:c]
242 base = path[:c]
243 dirs = self._dirs
243 dirs = self._dirs
244 if dirs[base] == 1:
244 if dirs[base] == 1:
245 del dirs[base]
245 del dirs[base]
246 self._decpath(base)
246 self._decpath(base)
247 else:
247 else:
248 dirs[base] -= 1
248 dirs[base] -= 1
249
249
250 def _incpathcheck(self, f):
250 def _incpathcheck(self, f):
251 if '\r' in f or '\n' in f:
251 if '\r' in f or '\n' in f:
252 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
252 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
253 % f)
253 % f)
254 # shadows
254 # shadows
255 if f in self._dirs:
255 if f in self._dirs:
256 raise util.Abort(_('directory %r already in dirstate') % f)
256 raise util.Abort(_('directory %r already in dirstate') % f)
257 for c in strutil.rfindall(f, '/'):
257 for c in strutil.rfindall(f, '/'):
258 d = f[:c]
258 d = f[:c]
259 if d in self._dirs:
259 if d in self._dirs:
260 break
260 break
261 if d in self._map and self[d] != 'r':
261 if d in self._map and self[d] != 'r':
262 raise util.Abort(_('file %r in dirstate clashes with %r') %
262 raise util.Abort(_('file %r in dirstate clashes with %r') %
263 (d, f))
263 (d, f))
264 self._incpath(f)
264 self._incpath(f)
265
265
266 def _changepath(self, f, newstate, relaxed=False):
266 def _changepath(self, f, newstate, relaxed=False):
267 # handle upcoming path changes
267 # handle upcoming path changes
268 oldstate = self[f]
268 oldstate = self[f]
269 if oldstate not in "?r" and newstate in "?r":
269 if oldstate not in "?r" and newstate in "?r":
270 if "_dirs" in self.__dict__:
270 if "_dirs" in self.__dict__:
271 self._decpath(f)
271 self._decpath(f)
272 return
272 return
273 if oldstate in "?r" and newstate not in "?r":
273 if oldstate in "?r" and newstate not in "?r":
274 if relaxed and oldstate == '?':
274 if relaxed and oldstate == '?':
275 # XXX
275 # XXX
276 # in relaxed mode we assume the caller knows
276 # in relaxed mode we assume the caller knows
277 # what it is doing, workaround for updating
277 # what it is doing, workaround for updating
278 # dir-to-file revisions
278 # dir-to-file revisions
279 if "_dirs" in self.__dict__:
279 if "_dirs" in self.__dict__:
280 self._incpath(f)
280 self._incpath(f)
281 return
281 return
282 self._incpathcheck(f)
282 self._incpathcheck(f)
283 return
283 return
284
284
285 def normal(self, f):
285 def normal(self, f):
286 'mark a file normal and clean'
286 'mark a file normal and clean'
287 self._dirty = True
287 self._dirty = True
288 self._changepath(f, 'n', True)
288 self._changepath(f, 'n', True)
289 s = os.lstat(self._join(f))
289 s = os.lstat(self._join(f))
290 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
290 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
291 if f in self._copymap:
291 if f in self._copymap:
292 del self._copymap[f]
292 del self._copymap[f]
293
293
294 def normallookup(self, f):
294 def normallookup(self, f):
295 'mark a file normal, but possibly dirty'
295 'mark a file normal, but possibly dirty'
296 if self._pl[1] != nullid and f in self._map:
296 if self._pl[1] != nullid and f in self._map:
297 # if there is a merge going on and the file was either
297 # if there is a merge going on and the file was either
298 # in state 'm' or dirty before being removed, restore that state.
298 # in state 'm' or dirty before being removed, restore that state.
299 entry = self._map[f]
299 entry = self._map[f]
300 if entry[0] == 'r' and entry[2] in (-1, -2):
300 if entry[0] == 'r' and entry[2] in (-1, -2):
301 source = self._copymap.get(f)
301 source = self._copymap.get(f)
302 if entry[2] == -1:
302 if entry[2] == -1:
303 self.merge(f)
303 self.merge(f)
304 elif entry[2] == -2:
304 elif entry[2] == -2:
305 self.normaldirty(f)
305 self.normaldirty(f)
306 if source:
306 if source:
307 self.copy(source, f)
307 self.copy(source, f)
308 return
308 return
309 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
309 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
310 return
310 return
311 self._dirty = True
311 self._dirty = True
312 self._changepath(f, 'n', True)
312 self._changepath(f, 'n', True)
313 self._map[f] = ('n', 0, -1, -1, 0)
313 self._map[f] = ('n', 0, -1, -1, 0)
314 if f in self._copymap:
314 if f in self._copymap:
315 del self._copymap[f]
315 del self._copymap[f]
316
316
317 def normaldirty(self, f):
317 def normaldirty(self, f):
318 'mark a file normal, but dirty'
318 'mark a file normal, but dirty'
319 self._dirty = True
319 self._dirty = True
320 self._changepath(f, 'n', True)
320 self._changepath(f, 'n', True)
321 self._map[f] = ('n', 0, -2, -1, 0)
321 self._map[f] = ('n', 0, -2, -1, 0)
322 if f in self._copymap:
322 if f in self._copymap:
323 del self._copymap[f]
323 del self._copymap[f]
324
324
325 def add(self, f):
325 def add(self, f):
326 'mark a file added'
326 'mark a file added'
327 self._dirty = True
327 self._dirty = True
328 self._changepath(f, 'a')
328 self._changepath(f, 'a')
329 self._map[f] = ('a', 0, -1, -1, 0)
329 self._map[f] = ('a', 0, -1, -1, 0)
330 if f in self._copymap:
330 if f in self._copymap:
331 del self._copymap[f]
331 del self._copymap[f]
332
332
333 def remove(self, f):
333 def remove(self, f):
334 'mark a file removed'
334 'mark a file removed'
335 self._dirty = True
335 self._dirty = True
336 self._changepath(f, 'r')
336 self._changepath(f, 'r')
337 size = 0
337 size = 0
338 if self._pl[1] != nullid and f in self._map:
338 if self._pl[1] != nullid and f in self._map:
339 entry = self._map[f]
339 entry = self._map[f]
340 if entry[0] == 'm':
340 if entry[0] == 'm':
341 size = -1
341 size = -1
342 elif entry[0] == 'n' and entry[2] == -2:
342 elif entry[0] == 'n' and entry[2] == -2:
343 size = -2
343 size = -2
344 self._map[f] = ('r', 0, size, 0, 0)
344 self._map[f] = ('r', 0, size, 0, 0)
345 if size == 0 and f in self._copymap:
345 if size == 0 and f in self._copymap:
346 del self._copymap[f]
346 del self._copymap[f]
347
347
348 def merge(self, f):
348 def merge(self, f):
349 'mark a file merged'
349 'mark a file merged'
350 self._dirty = True
350 self._dirty = True
351 s = os.lstat(self._join(f))
351 s = os.lstat(self._join(f))
352 self._changepath(f, 'm', True)
352 self._changepath(f, 'm', True)
353 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
353 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
354 if f in self._copymap:
354 if f in self._copymap:
355 del self._copymap[f]
355 del self._copymap[f]
356
356
357 def forget(self, f):
357 def forget(self, f):
358 'forget a file'
358 'forget a file'
359 self._dirty = True
359 self._dirty = True
360 try:
360 try:
361 self._changepath(f, '?')
361 self._changepath(f, '?')
362 del self._map[f]
362 del self._map[f]
363 except KeyError:
363 except KeyError:
364 self._ui.warn(_("not in dirstate: %s\n") % f)
364 self._ui.warn(_("not in dirstate: %s\n") % f)
365
365
366 def _normalize(self, path):
366 def _normalize(self, path):
367 normpath = os.path.normcase(os.path.normpath(path))
367 normpath = os.path.normcase(os.path.normpath(path))
368 if normpath in self._foldmap:
368 if normpath in self._foldmap:
369 return self._foldmap[normpath]
369 return self._foldmap[normpath]
370 elif os.path.exists(path):
370 elif os.path.exists(path):
371 self._foldmap[normpath] = util.fspath(path, self._root)
371 self._foldmap[normpath] = util.fspath(path, self._root)
372 return self._foldmap[normpath]
372 return self._foldmap[normpath]
373 else:
373 else:
374 return path
374 return path
375
375
376 def clear(self):
376 def clear(self):
377 self._map = {}
377 self._map = {}
378 if "_dirs" in self.__dict__:
378 if "_dirs" in self.__dict__:
379 delattr(self, "_dirs");
379 delattr(self, "_dirs");
380 self._copymap = {}
380 self._copymap = {}
381 self._pl = [nullid, nullid]
381 self._pl = [nullid, nullid]
382 self._dirty = True
382 self._dirty = True
383
383
384 def rebuild(self, parent, files):
384 def rebuild(self, parent, files):
385 self.clear()
385 self.clear()
386 for f in files:
386 for f in files:
387 if 'x' in files.flags(f):
387 if 'x' in files.flags(f):
388 self._map[f] = ('n', 0777, -1, 0, 0)
388 self._map[f] = ('n', 0777, -1, 0, 0)
389 else:
389 else:
390 self._map[f] = ('n', 0666, -1, 0, 0)
390 self._map[f] = ('n', 0666, -1, 0, 0)
391 self._pl = (parent, nullid)
391 self._pl = (parent, nullid)
392 self._dirty = True
392 self._dirty = True
393
393
394 def write(self):
394 def write(self):
395 if not self._dirty:
395 if not self._dirty:
396 return
396 return
397 st = self._opener("dirstate", "w", atomictemp=True)
397 st = self._opener("dirstate", "w", atomictemp=True)
398
398
399 try:
399 try:
400 gran = int(self._ui.config('dirstate', 'granularity', 1))
400 gran = int(self._ui.config('dirstate', 'granularity', 1))
401 except ValueError:
401 except ValueError:
402 gran = 1
402 gran = 1
403 limit = sys.maxint
403 limit = sys.maxint
404 if gran > 0:
404 if gran > 0:
405 limit = util.fstat(st).st_mtime - gran
405 limit = util.fstat(st).st_mtime - gran
406
406
407 cs = cStringIO.StringIO()
407 cs = cStringIO.StringIO()
408 copymap = self._copymap
408 copymap = self._copymap
409 pack = struct.pack
409 pack = struct.pack
410 write = cs.write
410 write = cs.write
411 write("".join(self._pl))
411 write("".join(self._pl))
412 for f, e in self._map.iteritems():
412 for f, e in self._map.iteritems():
413 if f in copymap:
413 if f in copymap:
414 f = "%s\0%s" % (f, copymap[f])
414 f = "%s\0%s" % (f, copymap[f])
415 if e[3] > limit and e[0] == 'n':
415 if e[3] > limit and e[0] == 'n':
416 e = (e[0], 0, -1, -1, 0)
416 e = (e[0], 0, -1, -1, 0)
417 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
417 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
418 write(e)
418 write(e)
419 write(f)
419 write(f)
420 st.write(cs.getvalue())
420 st.write(cs.getvalue())
421 st.rename()
421 st.rename()
422 self._dirty = self._dirtypl = False
422 self._dirty = self._dirtypl = False
423
423
424 def _filter(self, files):
424 def _filter(self, files):
425 ret = {}
425 ret = {}
426 unknown = []
426 unknown = []
427
427
428 for x in files:
428 for x in files:
429 if x == '.':
429 if x == '.':
430 return self._map.copy()
430 return self._map.copy()
431 if x not in self._map:
431 if x not in self._map:
432 unknown.append(x)
432 unknown.append(x)
433 else:
433 else:
434 ret[x] = self._map[x]
434 ret[x] = self._map[x]
435
435
436 if not unknown:
436 if not unknown:
437 return ret
437 return ret
438
438
439 b = self._map.keys()
439 b = self._map.keys()
440 b.sort()
440 b.sort()
441 blen = len(b)
441 blen = len(b)
442
442
443 for x in unknown:
443 for x in unknown:
444 bs = bisect.bisect(b, "%s%s" % (x, '/'))
444 bs = bisect.bisect(b, "%s%s" % (x, '/'))
445 while bs < blen:
445 while bs < blen:
446 s = b[bs]
446 s = b[bs]
447 if len(s) > len(x) and s.startswith(x):
447 if len(s) > len(x) and s.startswith(x):
448 ret[s] = self._map[s]
448 ret[s] = self._map[s]
449 else:
449 else:
450 break
450 break
451 bs += 1
451 bs += 1
452 return ret
452 return ret
453
453
454 def _supported(self, f, mode, verbose=False):
454 def _supported(self, f, mode, verbose=False):
455 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
455 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
456 return True
456 return True
457 if verbose:
457 if verbose:
458 kind = 'unknown'
458 kind = 'unknown'
459 if stat.S_ISCHR(mode): kind = _('character device')
459 if stat.S_ISCHR(mode): kind = _('character device')
460 elif stat.S_ISBLK(mode): kind = _('block device')
460 elif stat.S_ISBLK(mode): kind = _('block device')
461 elif stat.S_ISFIFO(mode): kind = _('fifo')
461 elif stat.S_ISFIFO(mode): kind = _('fifo')
462 elif stat.S_ISSOCK(mode): kind = _('socket')
462 elif stat.S_ISSOCK(mode): kind = _('socket')
463 elif stat.S_ISDIR(mode): kind = _('directory')
463 elif stat.S_ISDIR(mode): kind = _('directory')
464 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
464 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
465 % (self.pathto(f), kind))
465 % (self.pathto(f), kind))
466 return False
466 return False
467
467
468 def _dirignore(self, f):
468 def _dirignore(self, f):
469 if f == '.':
469 if f == '.':
470 return False
470 return False
471 if self._ignore(f):
471 if self._ignore(f):
472 return True
472 return True
473 for c in strutil.findall(f, '/'):
473 for c in strutil.findall(f, '/'):
474 if self._ignore(f[:c]):
474 if self._ignore(f[:c]):
475 return True
475 return True
476 return False
476 return False
477
477
478 def walk(self, match):
478 def walk(self, match):
479 # filter out the src and stat
479 # filter out the src and stat
480 for src, f, st in self.statwalk(match):
480 for src, f, st in self.statwalk(match):
481 yield f
481 yield f
482
482
483 def statwalk(self, match, unknown=True, ignored=False):
483 def statwalk(self, match, unknown=True, ignored=False):
484 '''
484 '''
485 walk recursively through the directory tree, finding all files
485 walk recursively through the directory tree, finding all files
486 matched by the match function
486 matched by the match function
487
487
488 results are yielded in a tuple (src, filename, st), where src
488 results are yielded in a tuple (src, filename, st), where src
489 is one of:
489 is one of:
490 'f' the file was found in the directory tree
490 'f' the file was found in the directory tree
491 'm' the file was only in the dirstate and not in the tree
491 'm' the file was only in the dirstate and not in the tree
492
492
493 and st is the stat result if the file was found in the directory.
493 and st is the stat result if the file was found in the directory.
494 '''
494 '''
495
495
496 def fwarn(f, msg):
496 def fwarn(f, msg):
497 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
497 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
498 return False
498 return False
499 badfn = fwarn
499 badfn = fwarn
500 if hasattr(match, 'bad'):
500 if hasattr(match, 'bad'):
501 badfn = match.bad
501 badfn = match.bad
502
502
503 # walk all files by default
503 # walk all files by default
504 files = match.files()
504 files = match.files()
505 if not files:
505 if not files:
506 files = ['.']
506 files = ['.']
507 dc = self._map.copy()
507 dc = self._map.copy()
508 else:
508 else:
509 files = util.unique(files)
509 files = util.unique(files)
510 dc = self._filter(files)
510 dc = self._filter(files)
511
511
512 def imatch(file_):
512 def imatch(file_):
513 if file_ not in dc and self._ignore(file_):
513 if file_ not in dc and self._ignore(file_):
514 return False
514 return False
515 return match(file_)
515 return match(file_)
516
516
517 # TODO: don't walk unknown directories if unknown and ignored are False
517 # TODO: don't walk unknown directories if unknown and ignored are False
518 ignore = self._ignore
518 ignore = self._ignore
519 dirignore = self._dirignore
519 dirignore = self._dirignore
520 if ignored:
520 if ignored:
521 imatch = match
521 imatch = match
522 ignore = util.never
522 ignore = util.never
523 dirignore = util.never
523 dirignore = util.never
524
524
525 # self._root may end with a path separator when self._root == '/'
525 # self._root may end with a path separator when self._root == '/'
526 common_prefix_len = len(self._root)
526 common_prefix_len = len(self._root)
527 if not util.endswithsep(self._root):
527 if not util.endswithsep(self._root):
528 common_prefix_len += 1
528 common_prefix_len += 1
529
529
530 normpath = util.normpath
530 normpath = util.normpath
531 listdir = osutil.listdir
531 listdir = osutil.listdir
532 lstat = os.lstat
532 lstat = os.lstat
533 bisect_left = bisect.bisect_left
533 bisect_left = bisect.bisect_left
534 isdir = os.path.isdir
534 isdir = os.path.isdir
535 pconvert = util.pconvert
535 pconvert = util.pconvert
536 join = os.path.join
536 join = os.path.join
537 s_isdir = stat.S_ISDIR
537 s_isdir = stat.S_ISDIR
538 supported = self._supported
538 supported = self._supported
539 _join = self._join
539 _join = self._join
540 known = {'.hg': 1}
540 known = {'.hg': 1}
541
541
542 # recursion free walker, faster than os.walk.
542 # recursion free walker, faster than os.walk.
543 def findfiles(s):
543 def findfiles(s):
544 work = [s]
544 work = [s]
545 wadd = work.append
545 wadd = work.append
546 found = []
546 found = []
547 add = found.append
547 add = found.append
548 if hasattr(match, 'dir'):
548 if hasattr(match, 'dir'):
549 match.dir(normpath(s[common_prefix_len:]))
549 match.dir(normpath(s[common_prefix_len:]))
550 while work:
550 while work:
551 top = work.pop()
551 top = work.pop()
552 entries = listdir(top, stat=True)
552 entries = listdir(top, stat=True)
553 # nd is the top of the repository dir tree
553 # nd is the top of the repository dir tree
554 nd = normpath(top[common_prefix_len:])
554 nd = normpath(top[common_prefix_len:])
555 if nd == '.':
555 if nd == '.':
556 nd = ''
556 nd = ''
557 else:
557 else:
558 # do not recurse into a repo contained in this
558 # do not recurse into a repo contained in this
559 # one. use bisect to find .hg directory so speed
559 # one. use bisect to find .hg directory so speed
560 # is good on big directory.
560 # is good on big directory.
561 names = [e[0] for e in entries]
561 names = [e[0] for e in entries]
562 hg = bisect_left(names, '.hg')
562 hg = bisect_left(names, '.hg')
563 if hg < len(names) and names[hg] == '.hg':
563 if hg < len(names) and names[hg] == '.hg':
564 if isdir(join(top, '.hg')):
564 if isdir(join(top, '.hg')):
565 continue
565 continue
566 for f, kind, st in entries:
566 for f, kind, st in entries:
567 np = pconvert(join(nd, f))
567 np = pconvert(join(nd, f))
568 if np in known:
568 if np in known:
569 continue
569 continue
570 known[np] = 1
570 known[np] = 1
571 p = join(top, f)
571 p = join(top, f)
572 # don't trip over symlinks
572 # don't trip over symlinks
573 if kind == stat.S_IFDIR:
573 if kind == stat.S_IFDIR:
574 if not ignore(np):
574 if not ignore(np):
575 wadd(p)
575 wadd(p)
576 if hasattr(match, 'dir'):
576 if hasattr(match, 'dir'):
577 match.dir(np)
577 match.dir(np)
578 if np in dc and match(np):
578 if np in dc and match(np):
579 add((np, 'm', st))
579 add((np, 'm', st))
580 elif imatch(np):
580 elif imatch(np):
581 if supported(np, st.st_mode):
581 if supported(np, st.st_mode):
582 add((np, 'f', st))
582 add((np, 'f', st))
583 elif np in dc:
583 elif np in dc:
584 add((np, 'm', st))
584 add((np, 'm', st))
585 found.sort()
585 found.sort()
586 return found
586 return found
587
587
588 # step one, find all files that match our criteria
588 # step one, find all files that match our criteria
589 files.sort()
589 files.sort()
590 for ff in files:
590 for ff in files:
591 nf = normpath(ff)
591 nf = normpath(ff)
592 f = _join(ff)
592 f = _join(ff)
593 try:
593 try:
594 st = lstat(f)
594 st = lstat(f)
595 except OSError, inst:
595 except OSError, inst:
596 found = False
596 found = False
597 for fn in dc:
597 for fn in dc:
598 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
598 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
599 found = True
599 found = True
600 break
600 break
601 if not found:
601 if not found:
602 if inst.errno != errno.ENOENT:
602 if inst.errno != errno.ENOENT:
603 fwarn(ff, inst.strerror)
603 fwarn(ff, inst.strerror)
604 elif badfn(ff, inst.strerror) and imatch(nf):
604 elif badfn(ff, inst.strerror) and imatch(nf):
605 yield 'f', ff, None
605 yield 'f', ff, None
606 continue
606 continue
607 if s_isdir(st.st_mode):
607 if s_isdir(st.st_mode):
608 if not dirignore(nf):
608 if not dirignore(nf):
609 for f, src, st in findfiles(f):
609 for f, src, st in findfiles(f):
610 yield src, f, st
610 yield src, f, st
611 else:
611 else:
612 if nf in known:
612 if nf in known:
613 continue
613 continue
614 known[nf] = 1
614 known[nf] = 1
615 if match(nf):
615 if match(nf):
616 if supported(ff, st.st_mode, verbose=True):
616 if supported(ff, st.st_mode, verbose=True):
617 yield 'f', self.normalize(nf), st
617 yield 'f', self.normalize(nf), st
618 elif ff in dc:
618 elif ff in dc:
619 yield 'm', nf, st
619 yield 'm', nf, st
620
620
621 # step two run through anything left in the dc hash and yield
621 # step two run through anything left in the dc hash and yield
622 # if we haven't already seen it
622 # if we haven't already seen it
623 ks = dc.keys()
623 ks = dc.keys()
624 ks.sort()
624 ks.sort()
625 for k in ks:
625 for k in ks:
626 if k in known:
626 if k in known:
627 continue
627 continue
628 known[k] = 1
628 known[k] = 1
629 if imatch(k):
629 if imatch(k):
630 yield 'm', k, None
630 yield 'm', k, None
631
631
632 def status(self, match, list_ignored, list_clean, list_unknown):
632 def status(self, match, ignored, clean, unknown):
633 listignored, listclean, listunknown = ignored, clean, unknown
634
633 lookup, modified, added, unknown, ignored = [], [], [], [], []
635 lookup, modified, added, unknown, ignored = [], [], [], [], []
634 removed, deleted, clean = [], [], []
636 removed, deleted, clean = [], [], []
635
637
636 _join = self._join
638 _join = self._join
637 lstat = os.lstat
639 lstat = os.lstat
638 cmap = self._copymap
640 cmap = self._copymap
639 dmap = self._map
641 dmap = self._map
640 ladd = lookup.append
642 ladd = lookup.append
641 madd = modified.append
643 madd = modified.append
642 aadd = added.append
644 aadd = added.append
643 uadd = unknown.append
645 uadd = unknown.append
644 iadd = ignored.append
646 iadd = ignored.append
645 radd = removed.append
647 radd = removed.append
646 dadd = deleted.append
648 dadd = deleted.append
647 cadd = clean.append
649 cadd = clean.append
648
650
649 for src, fn, st in self.statwalk(match, unknown=list_unknown,
651 for src, fn, st in self.statwalk(match, listunknown, listignored):
650 ignored=list_ignored):
651 if fn not in dmap:
652 if fn not in dmap:
652 if (list_ignored or match.exact(fn)) and self._dirignore(fn):
653 if (listignored or match.exact(fn)) and self._dirignore(fn):
653 if list_ignored:
654 if listignored:
654 iadd(fn)
655 iadd(fn)
655 elif list_unknown:
656 elif listunknown:
656 uadd(fn)
657 uadd(fn)
657 continue
658 continue
658
659
659 state, mode, size, time, foo = dmap[fn]
660 state, mode, size, time, foo = dmap[fn]
660
661
661 if src == 'm':
662 if src == 'm':
662 nonexistent = True
663 nonexistent = True
663 if not st:
664 if not st:
664 try:
665 try:
665 st = lstat(_join(fn))
666 st = lstat(_join(fn))
666 except OSError, inst:
667 except OSError, inst:
667 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
668 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
668 raise
669 raise
669 st = None
670 st = None
670 # We need to re-check that it is a valid file
671 # We need to re-check that it is a valid file
671 if st and self._supported(fn, st.st_mode):
672 if st and self._supported(fn, st.st_mode):
672 nonexistent = False
673 nonexistent = False
673 if nonexistent and state in "nma":
674 if nonexistent and state in "nma":
674 dadd(fn)
675 dadd(fn)
675 continue
676 continue
676 # check the common case first
677 # check the common case first
677 if state == 'n':
678 if state == 'n':
678 if not st:
679 if not st:
679 st = lstat(_join(fn))
680 st = lstat(_join(fn))
680 if (size >= 0 and
681 if (size >= 0 and
681 (size != st.st_size
682 (size != st.st_size
682 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
683 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
683 or size == -2
684 or size == -2
684 or fn in self._copymap):
685 or fn in self._copymap):
685 madd(fn)
686 madd(fn)
686 elif time != int(st.st_mtime):
687 elif time != int(st.st_mtime):
687 ladd(fn)
688 ladd(fn)
688 elif list_clean:
689 elif listclean:
689 cadd(fn)
690 cadd(fn)
690 elif state == 'm':
691 elif state == 'm':
691 madd(fn)
692 madd(fn)
692 elif state == 'a':
693 elif state == 'a':
693 aadd(fn)
694 aadd(fn)
694 elif state == 'r':
695 elif state == 'r':
695 radd(fn)
696 radd(fn)
696
697
697 return (lookup, modified, added, removed, deleted, unknown, ignored,
698 return (lookup, modified, added, removed, deleted, unknown, ignored,
698 clean)
699 clean)
@@ -1,2136 +1,2137 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid == None:
121 if changeid == None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, parent=None,
143 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
144 extra={}):
145 use_dirstate = parent is None
145 use_dirstate = parent is None
146
146
147 if isinstance(names, str):
147 if isinstance(names, str):
148 allchars = names
148 allchars = names
149 names = (names,)
149 names = (names,)
150 else:
150 else:
151 allchars = ''.join(names)
151 allchars = ''.join(names)
152 for c in self.tag_disallowed:
152 for c in self.tag_disallowed:
153 if c in allchars:
153 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
155
156 for name in names:
156 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
158 local=local)
159
159
160 def writetags(fp, names, munge, prevtags):
160 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
161 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
162 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
163 fp.write('\n')
164 for name in names:
164 for name in names:
165 m = munge and munge(name) or name
165 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
166 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
167 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
168 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
169 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
170 fp.close()
171
171
172 prevtags = ''
172 prevtags = ''
173 if local:
173 if local:
174 try:
174 try:
175 fp = self.opener('localtags', 'r+')
175 fp = self.opener('localtags', 'r+')
176 except IOError, err:
176 except IOError, err:
177 fp = self.opener('localtags', 'a')
177 fp = self.opener('localtags', 'a')
178 else:
178 else:
179 prevtags = fp.read()
179 prevtags = fp.read()
180
180
181 # local tags are stored in the current charset
181 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
182 writetags(fp, names, None, prevtags)
183 for name in names:
183 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
184 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
185 return
186
186
187 if use_dirstate:
187 if use_dirstate:
188 try:
188 try:
189 fp = self.wfile('.hgtags', 'rb+')
189 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
190 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
191 fp = self.wfile('.hgtags', 'ab')
192 else:
192 else:
193 prevtags = fp.read()
193 prevtags = fp.read()
194 else:
194 else:
195 try:
195 try:
196 prevtags = self.filectx('.hgtags', parent).data()
196 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
197 except revlog.LookupError:
198 pass
198 pass
199 fp = self.wfile('.hgtags', 'wb')
199 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
200 if prevtags:
201 fp.write(prevtags)
201 fp.write(prevtags)
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
204 writetags(fp, names, util.fromlocal, prevtags)
205
205
206 if use_dirstate and '.hgtags' not in self.dirstate:
206 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
210 extra=extra)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self._tag(names, node, message, local, user, date)
243 self._tag(names, node, message, local, user, date)
244
244
245 def tags(self):
245 def tags(self):
246 '''return a mapping of tag to node'''
246 '''return a mapping of tag to node'''
247 if self.tagscache:
247 if self.tagscache:
248 return self.tagscache
248 return self.tagscache
249
249
250 globaltags = {}
250 globaltags = {}
251 tagtypes = {}
251 tagtypes = {}
252
252
253 def readtags(lines, fn, tagtype):
253 def readtags(lines, fn, tagtype):
254 filetags = {}
254 filetags = {}
255 count = 0
255 count = 0
256
256
257 def warn(msg):
257 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
259
260 for l in lines:
260 for l in lines:
261 count += 1
261 count += 1
262 if not l:
262 if not l:
263 continue
263 continue
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 warn(_("cannot parse entry"))
266 warn(_("cannot parse entry"))
267 continue
267 continue
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
273 warn(_("node '%s' is not well formed") % node)
274 continue
274 continue
275 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
276 warn(_("tag '%s' refers to unknown node") % key)
277 continue
277 continue
278
278
279 h = []
279 h = []
280 if key in filetags:
280 if key in filetags:
281 n, h = filetags[key]
281 n, h = filetags[key]
282 h.append(n)
282 h.append(n)
283 filetags[key] = (bin_n, h)
283 filetags[key] = (bin_n, h)
284
284
285 for k, nh in filetags.items():
285 for k, nh in filetags.items():
286 if k not in globaltags:
286 if k not in globaltags:
287 globaltags[k] = nh
287 globaltags[k] = nh
288 tagtypes[k] = tagtype
288 tagtypes[k] = tagtype
289 continue
289 continue
290
290
291 # we prefer the global tag if:
291 # we prefer the global tag if:
292 # it supercedes us OR
292 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
293 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
294 # otherwise we win because we're tip-most
295 an, ah = nh
295 an, ah = nh
296 bn, bh = globaltags[k]
296 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
297 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
298 (bn not in ah or len(bh) > len(ah))):
299 an = bn
299 an = bn
300 ah.extend([n for n in bh if n not in ah])
300 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
301 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
302 tagtypes[k] = tagtype
303
303
304 # read the tags file from each head, ending with the tip
304 # read the tags file from each head, ending with the tip
305 f = None
305 f = None
306 for rev, node, fnode in self._hgtagsnodes():
306 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
307 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
308 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
309 readtags(f.data().splitlines(), f, "global")
310
310
311 try:
311 try:
312 data = util.fromlocal(self.opener("localtags").read())
312 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
313 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
314 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
315 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
316 except IOError:
317 pass
317 pass
318
318
319 self.tagscache = {}
319 self.tagscache = {}
320 self._tagstypecache = {}
320 self._tagstypecache = {}
321 for k,nh in globaltags.items():
321 for k,nh in globaltags.items():
322 n = nh[0]
322 n = nh[0]
323 if n != nullid:
323 if n != nullid:
324 self.tagscache[k] = n
324 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
325 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
326 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
327 return self.tagscache
328
328
329 def tagtype(self, tagname):
329 def tagtype(self, tagname):
330 '''
330 '''
331 return the type of the given tag. result can be:
331 return the type of the given tag. result can be:
332
332
333 'local' : a local tag
333 'local' : a local tag
334 'global' : a global tag
334 'global' : a global tag
335 None : tag does not exist
335 None : tag does not exist
336 '''
336 '''
337
337
338 self.tags()
338 self.tags()
339
339
340 return self._tagstypecache.get(tagname)
340 return self._tagstypecache.get(tagname)
341
341
342 def _hgtagsnodes(self):
342 def _hgtagsnodes(self):
343 heads = self.heads()
343 heads = self.heads()
344 heads.reverse()
344 heads.reverse()
345 last = {}
345 last = {}
346 ret = []
346 ret = []
347 for node in heads:
347 for node in heads:
348 c = self[node]
348 c = self[node]
349 rev = c.rev()
349 rev = c.rev()
350 try:
350 try:
351 fnode = c.filenode('.hgtags')
351 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
352 except revlog.LookupError:
353 continue
353 continue
354 ret.append((rev, node, fnode))
354 ret.append((rev, node, fnode))
355 if fnode in last:
355 if fnode in last:
356 ret[last[fnode]] = None
356 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
357 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
358 return [item for item in ret if item]
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().items():
363 for t, n in self.tags().items():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 l.sort()
369 l.sort()
370 return [(t, n) for r, t, n in l]
370 return [(t, n) for r, t, n in l]
371
371
372 def nodetags(self, node):
372 def nodetags(self, node):
373 '''return the tags associated with a node'''
373 '''return the tags associated with a node'''
374 if not self.nodetagscache:
374 if not self.nodetagscache:
375 self.nodetagscache = {}
375 self.nodetagscache = {}
376 for t, n in self.tags().items():
376 for t, n in self.tags().items():
377 self.nodetagscache.setdefault(n, []).append(t)
377 self.nodetagscache.setdefault(n, []).append(t)
378 return self.nodetagscache.get(node, [])
378 return self.nodetagscache.get(node, [])
379
379
380 def _branchtags(self, partial, lrev):
380 def _branchtags(self, partial, lrev):
381 tiprev = len(self) - 1
381 tiprev = len(self) - 1
382 if lrev != tiprev:
382 if lrev != tiprev:
383 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._updatebranchcache(partial, lrev+1, tiprev+1)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
385
385
386 return partial
386 return partial
387
387
388 def branchtags(self):
388 def branchtags(self):
389 tip = self.changelog.tip()
389 tip = self.changelog.tip()
390 if self.branchcache is not None and self._branchcachetip == tip:
390 if self.branchcache is not None and self._branchcachetip == tip:
391 return self.branchcache
391 return self.branchcache
392
392
393 oldtip = self._branchcachetip
393 oldtip = self._branchcachetip
394 self._branchcachetip = tip
394 self._branchcachetip = tip
395 if self.branchcache is None:
395 if self.branchcache is None:
396 self.branchcache = {} # avoid recursion in changectx
396 self.branchcache = {} # avoid recursion in changectx
397 else:
397 else:
398 self.branchcache.clear() # keep using the same dict
398 self.branchcache.clear() # keep using the same dict
399 if oldtip is None or oldtip not in self.changelog.nodemap:
399 if oldtip is None or oldtip not in self.changelog.nodemap:
400 partial, last, lrev = self._readbranchcache()
400 partial, last, lrev = self._readbranchcache()
401 else:
401 else:
402 lrev = self.changelog.rev(oldtip)
402 lrev = self.changelog.rev(oldtip)
403 partial = self._ubranchcache
403 partial = self._ubranchcache
404
404
405 self._branchtags(partial, lrev)
405 self._branchtags(partial, lrev)
406
406
407 # the branch cache is stored on disk as UTF-8, but in the local
407 # the branch cache is stored on disk as UTF-8, but in the local
408 # charset internally
408 # charset internally
409 for k, v in partial.items():
409 for k, v in partial.items():
410 self.branchcache[util.tolocal(k)] = v
410 self.branchcache[util.tolocal(k)] = v
411 self._ubranchcache = partial
411 self._ubranchcache = partial
412 return self.branchcache
412 return self.branchcache
413
413
414 def _readbranchcache(self):
414 def _readbranchcache(self):
415 partial = {}
415 partial = {}
416 try:
416 try:
417 f = self.opener("branch.cache")
417 f = self.opener("branch.cache")
418 lines = f.read().split('\n')
418 lines = f.read().split('\n')
419 f.close()
419 f.close()
420 except (IOError, OSError):
420 except (IOError, OSError):
421 return {}, nullid, nullrev
421 return {}, nullid, nullrev
422
422
423 try:
423 try:
424 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = lines.pop(0).split(" ", 1)
425 last, lrev = bin(last), int(lrev)
425 last, lrev = bin(last), int(lrev)
426 if lrev >= len(self) or self[lrev].node() != last:
426 if lrev >= len(self) or self[lrev].node() != last:
427 # invalidate the cache
427 # invalidate the cache
428 raise ValueError('invalidating branch cache (tip differs)')
428 raise ValueError('invalidating branch cache (tip differs)')
429 for l in lines:
429 for l in lines:
430 if not l: continue
430 if not l: continue
431 node, label = l.split(" ", 1)
431 node, label = l.split(" ", 1)
432 partial[label.strip()] = bin(node)
432 partial[label.strip()] = bin(node)
433 except (KeyboardInterrupt, util.SignalInterrupt):
433 except (KeyboardInterrupt, util.SignalInterrupt):
434 raise
434 raise
435 except Exception, inst:
435 except Exception, inst:
436 if self.ui.debugflag:
436 if self.ui.debugflag:
437 self.ui.warn(str(inst), '\n')
437 self.ui.warn(str(inst), '\n')
438 partial, last, lrev = {}, nullid, nullrev
438 partial, last, lrev = {}, nullid, nullrev
439 return partial, last, lrev
439 return partial, last, lrev
440
440
441 def _writebranchcache(self, branches, tip, tiprev):
441 def _writebranchcache(self, branches, tip, tiprev):
442 try:
442 try:
443 f = self.opener("branch.cache", "w", atomictemp=True)
443 f = self.opener("branch.cache", "w", atomictemp=True)
444 f.write("%s %s\n" % (hex(tip), tiprev))
444 f.write("%s %s\n" % (hex(tip), tiprev))
445 for label, node in branches.iteritems():
445 for label, node in branches.iteritems():
446 f.write("%s %s\n" % (hex(node), label))
446 f.write("%s %s\n" % (hex(node), label))
447 f.rename()
447 f.rename()
448 except (IOError, OSError):
448 except (IOError, OSError):
449 pass
449 pass
450
450
451 def _updatebranchcache(self, partial, start, end):
451 def _updatebranchcache(self, partial, start, end):
452 for r in xrange(start, end):
452 for r in xrange(start, end):
453 c = self[r]
453 c = self[r]
454 b = c.branch()
454 b = c.branch()
455 partial[b] = c.node()
455 partial[b] = c.node()
456
456
457 def lookup(self, key):
457 def lookup(self, key):
458 if key == '.':
458 if key == '.':
459 return self.dirstate.parents()[0]
459 return self.dirstate.parents()[0]
460 elif key == 'null':
460 elif key == 'null':
461 return nullid
461 return nullid
462 n = self.changelog._match(key)
462 n = self.changelog._match(key)
463 if n:
463 if n:
464 return n
464 return n
465 if key in self.tags():
465 if key in self.tags():
466 return self.tags()[key]
466 return self.tags()[key]
467 if key in self.branchtags():
467 if key in self.branchtags():
468 return self.branchtags()[key]
468 return self.branchtags()[key]
469 n = self.changelog._partialmatch(key)
469 n = self.changelog._partialmatch(key)
470 if n:
470 if n:
471 return n
471 return n
472 try:
472 try:
473 if len(key) == 20:
473 if len(key) == 20:
474 key = hex(key)
474 key = hex(key)
475 except:
475 except:
476 pass
476 pass
477 raise repo.RepoError(_("unknown revision '%s'") % key)
477 raise repo.RepoError(_("unknown revision '%s'") % key)
478
478
479 def local(self):
479 def local(self):
480 return True
480 return True
481
481
482 def join(self, f):
482 def join(self, f):
483 return os.path.join(self.path, f)
483 return os.path.join(self.path, f)
484
484
485 def sjoin(self, f):
485 def sjoin(self, f):
486 f = self.encodefn(f)
486 f = self.encodefn(f)
487 return os.path.join(self.spath, f)
487 return os.path.join(self.spath, f)
488
488
489 def wjoin(self, f):
489 def wjoin(self, f):
490 return os.path.join(self.root, f)
490 return os.path.join(self.root, f)
491
491
492 def rjoin(self, f):
492 def rjoin(self, f):
493 return os.path.join(self.root, util.pconvert(f))
493 return os.path.join(self.root, util.pconvert(f))
494
494
495 def file(self, f):
495 def file(self, f):
496 if f[0] == '/':
496 if f[0] == '/':
497 f = f[1:]
497 f = f[1:]
498 return filelog.filelog(self.sopener, f)
498 return filelog.filelog(self.sopener, f)
499
499
500 def changectx(self, changeid):
500 def changectx(self, changeid):
501 return self[changeid]
501 return self[changeid]
502
502
503 def parents(self, changeid=None):
503 def parents(self, changeid=None):
504 '''get list of changectxs for parents of changeid'''
504 '''get list of changectxs for parents of changeid'''
505 return self[changeid].parents()
505 return self[changeid].parents()
506
506
507 def filectx(self, path, changeid=None, fileid=None):
507 def filectx(self, path, changeid=None, fileid=None):
508 """changeid can be a changeset revision, node, or tag.
508 """changeid can be a changeset revision, node, or tag.
509 fileid can be a file revision or node."""
509 fileid can be a file revision or node."""
510 return context.filectx(self, path, changeid, fileid)
510 return context.filectx(self, path, changeid, fileid)
511
511
512 def getcwd(self):
512 def getcwd(self):
513 return self.dirstate.getcwd()
513 return self.dirstate.getcwd()
514
514
515 def pathto(self, f, cwd=None):
515 def pathto(self, f, cwd=None):
516 return self.dirstate.pathto(f, cwd)
516 return self.dirstate.pathto(f, cwd)
517
517
518 def wfile(self, f, mode='r'):
518 def wfile(self, f, mode='r'):
519 return self.wopener(f, mode)
519 return self.wopener(f, mode)
520
520
521 def _link(self, f):
521 def _link(self, f):
522 return os.path.islink(self.wjoin(f))
522 return os.path.islink(self.wjoin(f))
523
523
524 def _filter(self, filter, filename, data):
524 def _filter(self, filter, filename, data):
525 if filter not in self.filterpats:
525 if filter not in self.filterpats:
526 l = []
526 l = []
527 for pat, cmd in self.ui.configitems(filter):
527 for pat, cmd in self.ui.configitems(filter):
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
529 fn = None
529 fn = None
530 params = cmd
530 params = cmd
531 for name, filterfn in self._datafilters.iteritems():
531 for name, filterfn in self._datafilters.iteritems():
532 if cmd.startswith(name):
532 if cmd.startswith(name):
533 fn = filterfn
533 fn = filterfn
534 params = cmd[len(name):].lstrip()
534 params = cmd[len(name):].lstrip()
535 break
535 break
536 if not fn:
536 if not fn:
537 fn = lambda s, c, **kwargs: util.filter(s, c)
537 fn = lambda s, c, **kwargs: util.filter(s, c)
538 # Wrap old filters not supporting keyword arguments
538 # Wrap old filters not supporting keyword arguments
539 if not inspect.getargspec(fn)[2]:
539 if not inspect.getargspec(fn)[2]:
540 oldfn = fn
540 oldfn = fn
541 fn = lambda s, c, **kwargs: oldfn(s, c)
541 fn = lambda s, c, **kwargs: oldfn(s, c)
542 l.append((mf, fn, params))
542 l.append((mf, fn, params))
543 self.filterpats[filter] = l
543 self.filterpats[filter] = l
544
544
545 for mf, fn, cmd in self.filterpats[filter]:
545 for mf, fn, cmd in self.filterpats[filter]:
546 if mf(filename):
546 if mf(filename):
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
549 break
549 break
550
550
551 return data
551 return data
552
552
553 def adddatafilter(self, name, filter):
553 def adddatafilter(self, name, filter):
554 self._datafilters[name] = filter
554 self._datafilters[name] = filter
555
555
556 def wread(self, filename):
556 def wread(self, filename):
557 if self._link(filename):
557 if self._link(filename):
558 data = os.readlink(self.wjoin(filename))
558 data = os.readlink(self.wjoin(filename))
559 else:
559 else:
560 data = self.wopener(filename, 'r').read()
560 data = self.wopener(filename, 'r').read()
561 return self._filter("encode", filename, data)
561 return self._filter("encode", filename, data)
562
562
563 def wwrite(self, filename, data, flags):
563 def wwrite(self, filename, data, flags):
564 data = self._filter("decode", filename, data)
564 data = self._filter("decode", filename, data)
565 try:
565 try:
566 os.unlink(self.wjoin(filename))
566 os.unlink(self.wjoin(filename))
567 except OSError:
567 except OSError:
568 pass
568 pass
569 self.wopener(filename, 'w').write(data)
569 self.wopener(filename, 'w').write(data)
570 util.set_flags(self.wjoin(filename), flags)
570 util.set_flags(self.wjoin(filename), flags)
571
571
572 def wwritedata(self, filename, data):
572 def wwritedata(self, filename, data):
573 return self._filter("decode", filename, data)
573 return self._filter("decode", filename, data)
574
574
575 def transaction(self):
575 def transaction(self):
576 if self._transref and self._transref():
576 if self._transref and self._transref():
577 return self._transref().nest()
577 return self._transref().nest()
578
578
579 # abort here if the journal already exists
579 # abort here if the journal already exists
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 raise repo.RepoError(_("journal already exists - run hg recover"))
581 raise repo.RepoError(_("journal already exists - run hg recover"))
582
582
583 # save dirstate for rollback
583 # save dirstate for rollback
584 try:
584 try:
585 ds = self.opener("dirstate").read()
585 ds = self.opener("dirstate").read()
586 except IOError:
586 except IOError:
587 ds = ""
587 ds = ""
588 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.dirstate", "w").write(ds)
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
590
590
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
593 (self.join("journal.branch"), self.join("undo.branch"))]
593 (self.join("journal.branch"), self.join("undo.branch"))]
594 tr = transaction.transaction(self.ui.warn, self.sopener,
594 tr = transaction.transaction(self.ui.warn, self.sopener,
595 self.sjoin("journal"),
595 self.sjoin("journal"),
596 aftertrans(renames),
596 aftertrans(renames),
597 self._createmode)
597 self._createmode)
598 self._transref = weakref.ref(tr)
598 self._transref = weakref.ref(tr)
599 return tr
599 return tr
600
600
601 def recover(self):
601 def recover(self):
602 l = self.lock()
602 l = self.lock()
603 try:
603 try:
604 if os.path.exists(self.sjoin("journal")):
604 if os.path.exists(self.sjoin("journal")):
605 self.ui.status(_("rolling back interrupted transaction\n"))
605 self.ui.status(_("rolling back interrupted transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 del l
613 del l
614
614
615 def rollback(self):
615 def rollback(self):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 self.ui.status(_("rolling back last transaction\n"))
621 self.ui.status(_("rolling back last transaction\n"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
624 try:
624 try:
625 branch = self.opener("undo.branch").read()
625 branch = self.opener("undo.branch").read()
626 self.dirstate.setbranch(branch)
626 self.dirstate.setbranch(branch)
627 except IOError:
627 except IOError:
628 self.ui.warn(_("Named branch could not be reset, "
628 self.ui.warn(_("Named branch could not be reset, "
629 "current branch still is: %s\n")
629 "current branch still is: %s\n")
630 % util.tolocal(self.dirstate.branch()))
630 % util.tolocal(self.dirstate.branch()))
631 self.invalidate()
631 self.invalidate()
632 self.dirstate.invalidate()
632 self.dirstate.invalidate()
633 else:
633 else:
634 self.ui.warn(_("no rollback information available\n"))
634 self.ui.warn(_("no rollback information available\n"))
635 finally:
635 finally:
636 del lock, wlock
636 del lock, wlock
637
637
638 def invalidate(self):
638 def invalidate(self):
639 for a in "changelog manifest".split():
639 for a in "changelog manifest".split():
640 if a in self.__dict__:
640 if a in self.__dict__:
641 delattr(self, a)
641 delattr(self, a)
642 self.tagscache = None
642 self.tagscache = None
643 self._tagstypecache = None
643 self._tagstypecache = None
644 self.nodetagscache = None
644 self.nodetagscache = None
645 self.branchcache = None
645 self.branchcache = None
646 self._ubranchcache = None
646 self._ubranchcache = None
647 self._branchcachetip = None
647 self._branchcachetip = None
648
648
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
650 try:
650 try:
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
652 except lock.LockHeld, inst:
652 except lock.LockHeld, inst:
653 if not wait:
653 if not wait:
654 raise
654 raise
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
656 (desc, inst.locker))
656 (desc, inst.locker))
657 # default to 600 seconds timeout
657 # default to 600 seconds timeout
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
659 releasefn, desc=desc)
659 releasefn, desc=desc)
660 if acquirefn:
660 if acquirefn:
661 acquirefn()
661 acquirefn()
662 return l
662 return l
663
663
664 def lock(self, wait=True):
664 def lock(self, wait=True):
665 if self._lockref and self._lockref():
665 if self._lockref and self._lockref():
666 return self._lockref()
666 return self._lockref()
667
667
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
669 _('repository %s') % self.origroot)
669 _('repository %s') % self.origroot)
670 self._lockref = weakref.ref(l)
670 self._lockref = weakref.ref(l)
671 return l
671 return l
672
672
673 def wlock(self, wait=True):
673 def wlock(self, wait=True):
674 if self._wlockref and self._wlockref():
674 if self._wlockref and self._wlockref():
675 return self._wlockref()
675 return self._wlockref()
676
676
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
678 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
679 self.origroot)
680 self._wlockref = weakref.ref(l)
680 self._wlockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 """
684 """
685 commit an individual file as part of a larger transaction
685 commit an individual file as part of a larger transaction
686 """
686 """
687
687
688 fn = fctx.path()
688 fn = fctx.path()
689 t = fctx.data()
689 t = fctx.data()
690 fl = self.file(fn)
690 fl = self.file(fn)
691 fp1 = manifest1.get(fn, nullid)
691 fp1 = manifest1.get(fn, nullid)
692 fp2 = manifest2.get(fn, nullid)
692 fp2 = manifest2.get(fn, nullid)
693
693
694 meta = {}
694 meta = {}
695 cp = fctx.renamed()
695 cp = fctx.renamed()
696 if cp and cp[0] != fn:
696 if cp and cp[0] != fn:
697 cp = cp[0]
697 cp = cp[0]
698 # Mark the new revision of this file as a copy of another
698 # Mark the new revision of this file as a copy of another
699 # file. This copy data will effectively act as a parent
699 # file. This copy data will effectively act as a parent
700 # of this new revision. If this is a merge, the first
700 # of this new revision. If this is a merge, the first
701 # parent will be the nullid (meaning "look up the copy data")
701 # parent will be the nullid (meaning "look up the copy data")
702 # and the second one will be the other parent. For example:
702 # and the second one will be the other parent. For example:
703 #
703 #
704 # 0 --- 1 --- 3 rev1 changes file foo
704 # 0 --- 1 --- 3 rev1 changes file foo
705 # \ / rev2 renames foo to bar and changes it
705 # \ / rev2 renames foo to bar and changes it
706 # \- 2 -/ rev3 should have bar with all changes and
706 # \- 2 -/ rev3 should have bar with all changes and
707 # should record that bar descends from
707 # should record that bar descends from
708 # bar in rev2 and foo in rev1
708 # bar in rev2 and foo in rev1
709 #
709 #
710 # this allows this merge to succeed:
710 # this allows this merge to succeed:
711 #
711 #
712 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
713 # \ / merging rev3 and rev4 should use bar@rev2
713 # \ / merging rev3 and rev4 should use bar@rev2
714 # \- 2 --- 4 as the merge base
714 # \- 2 --- 4 as the merge base
715 #
715 #
716 meta["copy"] = cp
716 meta["copy"] = cp
717 if not manifest2: # not a branch merge
717 if not manifest2: # not a branch merge
718 meta["copyrev"] = hex(manifest1[cp])
718 meta["copyrev"] = hex(manifest1[cp])
719 fp2 = nullid
719 fp2 = nullid
720 elif fp2 != nullid: # copied on remote side
720 elif fp2 != nullid: # copied on remote side
721 meta["copyrev"] = hex(manifest1[cp])
721 meta["copyrev"] = hex(manifest1[cp])
722 elif fp1 != nullid: # copied on local side, reversed
722 elif fp1 != nullid: # copied on local side, reversed
723 meta["copyrev"] = hex(manifest2[cp])
723 meta["copyrev"] = hex(manifest2[cp])
724 fp2 = fp1
724 fp2 = fp1
725 elif cp in manifest2: # directory rename on local side
725 elif cp in manifest2: # directory rename on local side
726 meta["copyrev"] = hex(manifest2[cp])
726 meta["copyrev"] = hex(manifest2[cp])
727 else: # directory rename on remote side
727 else: # directory rename on remote side
728 meta["copyrev"] = hex(manifest1[cp])
728 meta["copyrev"] = hex(manifest1[cp])
729 self.ui.debug(_(" %s: copy %s:%s\n") %
729 self.ui.debug(_(" %s: copy %s:%s\n") %
730 (fn, cp, meta["copyrev"]))
730 (fn, cp, meta["copyrev"]))
731 fp1 = nullid
731 fp1 = nullid
732 elif fp2 != nullid:
732 elif fp2 != nullid:
733 # is one parent an ancestor of the other?
733 # is one parent an ancestor of the other?
734 fpa = fl.ancestor(fp1, fp2)
734 fpa = fl.ancestor(fp1, fp2)
735 if fpa == fp1:
735 if fpa == fp1:
736 fp1, fp2 = fp2, nullid
736 fp1, fp2 = fp2, nullid
737 elif fpa == fp2:
737 elif fpa == fp2:
738 fp2 = nullid
738 fp2 = nullid
739
739
740 # is the file unmodified from the parent? report existing entry
740 # is the file unmodified from the parent? report existing entry
741 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
742 return fp1
742 return fp1
743
743
744 changelist.append(fn)
744 changelist.append(fn)
745 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745 return fl.add(t, meta, tr, linkrev, fp1, fp2)
746
746
747 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
748 if p1 is None:
748 if p1 is None:
749 p1, p2 = self.dirstate.parents()
749 p1, p2 = self.dirstate.parents()
750 return self.commit(files=files, text=text, user=user, date=date,
750 return self.commit(files=files, text=text, user=user, date=date,
751 p1=p1, p2=p2, extra=extra, empty_ok=True)
751 p1=p1, p2=p2, extra=extra, empty_ok=True)
752
752
753 def commit(self, files=None, text="", user=None, date=None,
753 def commit(self, files=None, text="", user=None, date=None,
754 match=None, force=False, force_editor=False,
754 match=None, force=False, force_editor=False,
755 p1=None, p2=None, extra={}, empty_ok=False):
755 p1=None, p2=None, extra={}, empty_ok=False):
756 wlock = lock = None
756 wlock = lock = None
757 if files:
757 if files:
758 files = util.unique(files)
758 files = util.unique(files)
759 try:
759 try:
760 wlock = self.wlock()
760 wlock = self.wlock()
761 lock = self.lock()
761 lock = self.lock()
762 use_dirstate = (p1 is None) # not rawcommit
762 use_dirstate = (p1 is None) # not rawcommit
763
763
764 if use_dirstate:
764 if use_dirstate:
765 p1, p2 = self.dirstate.parents()
765 p1, p2 = self.dirstate.parents()
766 update_dirstate = True
766 update_dirstate = True
767
767
768 if (not force and p2 != nullid and
768 if (not force and p2 != nullid and
769 (match and (match.files() or match.anypats()))):
769 (match and (match.files() or match.anypats()))):
770 raise util.Abort(_('cannot partially commit a merge '
770 raise util.Abort(_('cannot partially commit a merge '
771 '(do not specify files or patterns)'))
771 '(do not specify files or patterns)'))
772
772
773 if files:
773 if files:
774 modified, removed = [], []
774 modified, removed = [], []
775 for f in files:
775 for f in files:
776 s = self.dirstate[f]
776 s = self.dirstate[f]
777 if s in 'nma':
777 if s in 'nma':
778 modified.append(f)
778 modified.append(f)
779 elif s == 'r':
779 elif s == 'r':
780 removed.append(f)
780 removed.append(f)
781 else:
781 else:
782 self.ui.warn(_("%s not tracked!\n") % f)
782 self.ui.warn(_("%s not tracked!\n") % f)
783 changes = [modified, [], removed, [], []]
783 changes = [modified, [], removed, [], []]
784 else:
784 else:
785 changes = self.status(match=match)
785 changes = self.status(match=match)
786 else:
786 else:
787 p1, p2 = p1, p2 or nullid
787 p1, p2 = p1, p2 or nullid
788 update_dirstate = (self.dirstate.parents()[0] == p1)
788 update_dirstate = (self.dirstate.parents()[0] == p1)
789 changes = [files, [], [], [], []]
789 changes = [files, [], [], [], []]
790
790
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 extra, changes)
792 extra, changes)
793 return self._commitctx(wctx, force, force_editor, empty_ok,
793 return self._commitctx(wctx, force, force_editor, empty_ok,
794 use_dirstate, update_dirstate)
794 use_dirstate, update_dirstate)
795 finally:
795 finally:
796 del lock, wlock
796 del lock, wlock
797
797
798 def commitctx(self, ctx):
798 def commitctx(self, ctx):
799 wlock = lock = None
799 wlock = lock = None
800 try:
800 try:
801 wlock = self.wlock()
801 wlock = self.wlock()
802 lock = self.lock()
802 lock = self.lock()
803 return self._commitctx(ctx, force=True, force_editor=False,
803 return self._commitctx(ctx, force=True, force_editor=False,
804 empty_ok=True, use_dirstate=False,
804 empty_ok=True, use_dirstate=False,
805 update_dirstate=False)
805 update_dirstate=False)
806 finally:
806 finally:
807 del lock, wlock
807 del lock, wlock
808
808
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 use_dirstate=True, update_dirstate=True):
810 use_dirstate=True, update_dirstate=True):
811 tr = None
811 tr = None
812 valid = 0 # don't save the dirstate if this isn't set
812 valid = 0 # don't save the dirstate if this isn't set
813 try:
813 try:
814 commit = wctx.modified() + wctx.added()
814 commit = wctx.modified() + wctx.added()
815 remove = wctx.removed()
815 remove = wctx.removed()
816 extra = wctx.extra().copy()
816 extra = wctx.extra().copy()
817 branchname = extra['branch']
817 branchname = extra['branch']
818 user = wctx.user()
818 user = wctx.user()
819 text = wctx.description()
819 text = wctx.description()
820
820
821 p1, p2 = [p.node() for p in wctx.parents()]
821 p1, p2 = [p.node() for p in wctx.parents()]
822 c1 = self.changelog.read(p1)
822 c1 = self.changelog.read(p1)
823 c2 = self.changelog.read(p2)
823 c2 = self.changelog.read(p2)
824 m1 = self.manifest.read(c1[0]).copy()
824 m1 = self.manifest.read(c1[0]).copy()
825 m2 = self.manifest.read(c2[0])
825 m2 = self.manifest.read(c2[0])
826
826
827 if use_dirstate:
827 if use_dirstate:
828 oldname = c1[5].get("branch") # stored in UTF-8
828 oldname = c1[5].get("branch") # stored in UTF-8
829 if (not commit and not remove and not force and p2 == nullid
829 if (not commit and not remove and not force and p2 == nullid
830 and branchname == oldname):
830 and branchname == oldname):
831 self.ui.status(_("nothing changed\n"))
831 self.ui.status(_("nothing changed\n"))
832 return None
832 return None
833
833
834 xp1 = hex(p1)
834 xp1 = hex(p1)
835 if p2 == nullid: xp2 = ''
835 if p2 == nullid: xp2 = ''
836 else: xp2 = hex(p2)
836 else: xp2 = hex(p2)
837
837
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839
839
840 tr = self.transaction()
840 tr = self.transaction()
841 trp = weakref.proxy(tr)
841 trp = weakref.proxy(tr)
842
842
843 # check in files
843 # check in files
844 new = {}
844 new = {}
845 changed = []
845 changed = []
846 linkrev = len(self)
846 linkrev = len(self)
847 commit.sort()
847 commit.sort()
848 for f in commit:
848 for f in commit:
849 self.ui.note(f + "\n")
849 self.ui.note(f + "\n")
850 try:
850 try:
851 fctx = wctx.filectx(f)
851 fctx = wctx.filectx(f)
852 newflags = fctx.flags()
852 newflags = fctx.flags()
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
854 if ((not changed or changed[-1] != f) and
854 if ((not changed or changed[-1] != f) and
855 m2.get(f) != new[f]):
855 m2.get(f) != new[f]):
856 # mention the file in the changelog if some
856 # mention the file in the changelog if some
857 # flag changed, even if there was no content
857 # flag changed, even if there was no content
858 # change.
858 # change.
859 if m1.flags(f) != newflags:
859 if m1.flags(f) != newflags:
860 changed.append(f)
860 changed.append(f)
861 m1.set(f, newflags)
861 m1.set(f, newflags)
862 if use_dirstate:
862 if use_dirstate:
863 self.dirstate.normal(f)
863 self.dirstate.normal(f)
864
864
865 except (OSError, IOError):
865 except (OSError, IOError):
866 if use_dirstate:
866 if use_dirstate:
867 self.ui.warn(_("trouble committing %s!\n") % f)
867 self.ui.warn(_("trouble committing %s!\n") % f)
868 raise
868 raise
869 else:
869 else:
870 remove.append(f)
870 remove.append(f)
871
871
872 # update manifest
872 # update manifest
873 m1.update(new)
873 m1.update(new)
874 remove.sort()
874 remove.sort()
875 removed = []
875 removed = []
876
876
877 for f in remove:
877 for f in remove:
878 if f in m1:
878 if f in m1:
879 del m1[f]
879 del m1[f]
880 removed.append(f)
880 removed.append(f)
881 elif f in m2:
881 elif f in m2:
882 removed.append(f)
882 removed.append(f)
883 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
883 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
884 (new, removed))
884 (new, removed))
885
885
886 # add changeset
886 # add changeset
887 if (not empty_ok and not text) or force_editor:
887 if (not empty_ok and not text) or force_editor:
888 edittext = []
888 edittext = []
889 if text:
889 if text:
890 edittext.append(text)
890 edittext.append(text)
891 edittext.append("")
891 edittext.append("")
892 edittext.append(_("HG: Enter commit message."
892 edittext.append(_("HG: Enter commit message."
893 " Lines beginning with 'HG:' are removed."))
893 " Lines beginning with 'HG:' are removed."))
894 edittext.append("HG: --")
894 edittext.append("HG: --")
895 edittext.append("HG: user: %s" % user)
895 edittext.append("HG: user: %s" % user)
896 if p2 != nullid:
896 if p2 != nullid:
897 edittext.append("HG: branch merge")
897 edittext.append("HG: branch merge")
898 if branchname:
898 if branchname:
899 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
899 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
900 edittext.extend(["HG: changed %s" % f for f in changed])
900 edittext.extend(["HG: changed %s" % f for f in changed])
901 edittext.extend(["HG: removed %s" % f for f in removed])
901 edittext.extend(["HG: removed %s" % f for f in removed])
902 if not changed and not remove:
902 if not changed and not remove:
903 edittext.append("HG: no files changed")
903 edittext.append("HG: no files changed")
904 edittext.append("")
904 edittext.append("")
905 # run editor in the repository root
905 # run editor in the repository root
906 olddir = os.getcwd()
906 olddir = os.getcwd()
907 os.chdir(self.root)
907 os.chdir(self.root)
908 text = self.ui.edit("\n".join(edittext), user)
908 text = self.ui.edit("\n".join(edittext), user)
909 os.chdir(olddir)
909 os.chdir(olddir)
910
910
911 lines = [line.rstrip() for line in text.rstrip().splitlines()]
911 lines = [line.rstrip() for line in text.rstrip().splitlines()]
912 while lines and not lines[0]:
912 while lines and not lines[0]:
913 del lines[0]
913 del lines[0]
914 if not lines and use_dirstate:
914 if not lines and use_dirstate:
915 raise util.Abort(_("empty commit message"))
915 raise util.Abort(_("empty commit message"))
916 text = '\n'.join(lines)
916 text = '\n'.join(lines)
917
917
918 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
918 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
919 user, wctx.date(), extra)
919 user, wctx.date(), extra)
920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
921 parent2=xp2)
921 parent2=xp2)
922 tr.close()
922 tr.close()
923
923
924 if self.branchcache:
924 if self.branchcache:
925 self.branchtags()
925 self.branchtags()
926
926
927 if use_dirstate or update_dirstate:
927 if use_dirstate or update_dirstate:
928 self.dirstate.setparents(n)
928 self.dirstate.setparents(n)
929 if use_dirstate:
929 if use_dirstate:
930 for f in removed:
930 for f in removed:
931 self.dirstate.forget(f)
931 self.dirstate.forget(f)
932 valid = 1 # our dirstate updates are complete
932 valid = 1 # our dirstate updates are complete
933
933
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 return n
935 return n
936 finally:
936 finally:
937 if not valid: # don't save our updated dirstate
937 if not valid: # don't save our updated dirstate
938 self.dirstate.invalidate()
938 self.dirstate.invalidate()
939 del tr
939 del tr
940
940
941 def walk(self, match, node=None):
941 def walk(self, match, node=None):
942 '''
942 '''
943 walk recursively through the directory tree or a given
943 walk recursively through the directory tree or a given
944 changeset, finding all files matched by the match
944 changeset, finding all files matched by the match
945 function
945 function
946 '''
946 '''
947
947
948 if node:
948 if node:
949 fdict = dict.fromkeys(match.files())
949 fdict = dict.fromkeys(match.files())
950 # for dirstate.walk, files=['.'] means "walk the whole tree".
950 # for dirstate.walk, files=['.'] means "walk the whole tree".
951 # follow that here, too
951 # follow that here, too
952 fdict.pop('.', None)
952 fdict.pop('.', None)
953 mdict = self.manifest.read(self.changelog.read(node)[0])
953 mdict = self.manifest.read(self.changelog.read(node)[0])
954 mfiles = mdict.keys()
954 mfiles = mdict.keys()
955 mfiles.sort()
955 mfiles.sort()
956 for fn in mfiles:
956 for fn in mfiles:
957 for ffn in fdict:
957 for ffn in fdict:
958 # match if the file is the exact name or a directory
958 # match if the file is the exact name or a directory
959 if ffn == fn or fn.startswith("%s/" % ffn):
959 if ffn == fn or fn.startswith("%s/" % ffn):
960 del fdict[ffn]
960 del fdict[ffn]
961 break
961 break
962 if match(fn):
962 if match(fn):
963 yield fn
963 yield fn
964 ffiles = fdict.keys()
964 ffiles = fdict.keys()
965 ffiles.sort()
965 ffiles.sort()
966 for fn in ffiles:
966 for fn in ffiles:
967 if match.bad(fn, 'No such file in rev ' + short(node)) \
967 if match.bad(fn, 'No such file in rev ' + short(node)) \
968 and match(fn):
968 and match(fn):
969 yield fn
969 yield fn
970 else:
970 else:
971 for fn in self.dirstate.walk(match):
971 for fn in self.dirstate.walk(match):
972 yield fn
972 yield fn
973
973
974 def status(self, node1=None, node2=None, match=None,
974 def status(self, node1=None, node2=None, match=None,
975 list_ignored=False, list_clean=False, list_unknown=True):
975 ignored=False, clean=False, unknown=True):
976 """return status of files between two nodes or node and working directory
976 """return status of files between two nodes or node and working directory
977
977
978 If node1 is None, use the first dirstate parent instead.
978 If node1 is None, use the first dirstate parent instead.
979 If node2 is None, compare node1 with working directory.
979 If node2 is None, compare node1 with working directory.
980 """
980 """
981
981
982 def fcmp(fn, getnode):
982 def fcmp(fn, getnode):
983 t1 = self.wread(fn)
983 t1 = self.wread(fn)
984 return self.file(fn).cmp(getnode(fn), t1)
984 return self.file(fn).cmp(getnode(fn), t1)
985
985
986 def mfmatches(node):
986 def mfmatches(node):
987 change = self.changelog.read(node)
987 change = self.changelog.read(node)
988 mf = self.manifest.read(change[0]).copy()
988 mf = self.manifest.read(change[0]).copy()
989 for fn in mf.keys():
989 for fn in mf.keys():
990 if not match(fn):
990 if not match(fn):
991 del mf[fn]
991 del mf[fn]
992 return mf
992 return mf
993
993
994 if not match:
994 if not match:
995 match = match_.always(self.root, self.getcwd())
995 match = match_.always(self.root, self.getcwd())
996
996
997 listignored, listclean, listunknown = ignored, clean, unknown
997 modified, added, removed, deleted, unknown = [], [], [], [], []
998 modified, added, removed, deleted, unknown = [], [], [], [], []
998 ignored, clean = [], []
999 ignored, clean = [], []
999
1000
1000 compareworking = False
1001 compareworking = False
1001 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1002 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1002 compareworking = True
1003 compareworking = True
1003
1004
1004 if not compareworking:
1005 if not compareworking:
1005 # read the manifest from node1 before the manifest from node2,
1006 # read the manifest from node1 before the manifest from node2,
1006 # so that we'll hit the manifest cache if we're going through
1007 # so that we'll hit the manifest cache if we're going through
1007 # all the revisions in parent->child order.
1008 # all the revisions in parent->child order.
1008 mf1 = mfmatches(node1)
1009 mf1 = mfmatches(node1)
1009
1010
1010 # are we comparing the working directory?
1011 # are we comparing the working directory?
1011 if not node2:
1012 if not node2:
1012 (lookup, modified, added, removed, deleted, unknown,
1013 (lookup, modified, added, removed, deleted, unknown,
1013 ignored, clean) = self.dirstate.status(match, list_ignored,
1014 ignored, clean) = self.dirstate.status(match, listignored,
1014 list_clean, list_unknown)
1015 listclean, listunknown)
1015 # are we comparing working dir against its parent?
1016 # are we comparing working dir against its parent?
1016 if compareworking:
1017 if compareworking:
1017 if lookup:
1018 if lookup:
1018 fixup = []
1019 fixup = []
1019 # do a full compare of any files that might have changed
1020 # do a full compare of any files that might have changed
1020 ctx = self['.']
1021 ctx = self['.']
1021 ff = self.dirstate.flagfunc(ctx.flags)
1022 ff = self.dirstate.flagfunc(ctx.flags)
1022 for f in lookup:
1023 for f in lookup:
1023 if (f not in ctx or ff(f) != ctx.flags(f)
1024 if (f not in ctx or ff(f) != ctx.flags(f)
1024 or ctx[f].cmp(self.wread(f))):
1025 or ctx[f].cmp(self.wread(f))):
1025 modified.append(f)
1026 modified.append(f)
1026 else:
1027 else:
1027 fixup.append(f)
1028 fixup.append(f)
1028 if list_clean:
1029 if listclean:
1029 clean.append(f)
1030 clean.append(f)
1030
1031
1031 # update dirstate for files that are actually clean
1032 # update dirstate for files that are actually clean
1032 if fixup:
1033 if fixup:
1033 wlock = None
1034 wlock = None
1034 try:
1035 try:
1035 try:
1036 try:
1036 wlock = self.wlock(False)
1037 wlock = self.wlock(False)
1037 except lock.LockException:
1038 except lock.LockException:
1038 pass
1039 pass
1039 if wlock:
1040 if wlock:
1040 for f in fixup:
1041 for f in fixup:
1041 self.dirstate.normal(f)
1042 self.dirstate.normal(f)
1042 finally:
1043 finally:
1043 del wlock
1044 del wlock
1044 else:
1045 else:
1045 # we are comparing working dir against non-parent
1046 # we are comparing working dir against non-parent
1046 # generate a pseudo-manifest for the working dir
1047 # generate a pseudo-manifest for the working dir
1047 # XXX: create it in dirstate.py ?
1048 # XXX: create it in dirstate.py ?
1048 mf2 = mfmatches(self.dirstate.parents()[0])
1049 mf2 = mfmatches(self.dirstate.parents()[0])
1049 ff = self.dirstate.flagfunc(mf2.flags)
1050 ff = self.dirstate.flagfunc(mf2.flags)
1050 for f in lookup + modified + added:
1051 for f in lookup + modified + added:
1051 mf2[f] = ""
1052 mf2[f] = ""
1052 mf2.set(f, ff(f))
1053 mf2.set(f, ff(f))
1053 for f in removed:
1054 for f in removed:
1054 if f in mf2:
1055 if f in mf2:
1055 del mf2[f]
1056 del mf2[f]
1056
1057
1057 else:
1058 else:
1058 # we are comparing two revisions
1059 # we are comparing two revisions
1059 mf2 = mfmatches(node2)
1060 mf2 = mfmatches(node2)
1060
1061
1061 if not compareworking:
1062 if not compareworking:
1062 # flush lists from dirstate before comparing manifests
1063 # flush lists from dirstate before comparing manifests
1063 modified, added, clean = [], [], []
1064 modified, added, clean = [], [], []
1064
1065
1065 # make sure to sort the files so we talk to the disk in a
1066 # make sure to sort the files so we talk to the disk in a
1066 # reasonable order
1067 # reasonable order
1067 mf2keys = mf2.keys()
1068 mf2keys = mf2.keys()
1068 mf2keys.sort()
1069 mf2keys.sort()
1069 getnode = lambda fn: mf1.get(fn, nullid)
1070 getnode = lambda fn: mf1.get(fn, nullid)
1070 for fn in mf2keys:
1071 for fn in mf2keys:
1071 if fn in mf1:
1072 if fn in mf1:
1072 if (mf1.flags(fn) != mf2.flags(fn) or
1073 if (mf1.flags(fn) != mf2.flags(fn) or
1073 (mf1[fn] != mf2[fn] and
1074 (mf1[fn] != mf2[fn] and
1074 (mf2[fn] != "" or fcmp(fn, getnode)))):
1075 (mf2[fn] != "" or fcmp(fn, getnode)))):
1075 modified.append(fn)
1076 modified.append(fn)
1076 elif list_clean:
1077 elif listclean:
1077 clean.append(fn)
1078 clean.append(fn)
1078 del mf1[fn]
1079 del mf1[fn]
1079 else:
1080 else:
1080 added.append(fn)
1081 added.append(fn)
1081
1082
1082 removed = mf1.keys()
1083 removed = mf1.keys()
1083
1084
1084 # sort and return results:
1085 # sort and return results:
1085 for l in modified, added, removed, deleted, unknown, ignored, clean:
1086 for l in modified, added, removed, deleted, unknown, ignored, clean:
1086 l.sort()
1087 l.sort()
1087 return (modified, added, removed, deleted, unknown, ignored, clean)
1088 return (modified, added, removed, deleted, unknown, ignored, clean)
1088
1089
1089 def add(self, list):
1090 def add(self, list):
1090 wlock = self.wlock()
1091 wlock = self.wlock()
1091 try:
1092 try:
1092 rejected = []
1093 rejected = []
1093 for f in list:
1094 for f in list:
1094 p = self.wjoin(f)
1095 p = self.wjoin(f)
1095 try:
1096 try:
1096 st = os.lstat(p)
1097 st = os.lstat(p)
1097 except:
1098 except:
1098 self.ui.warn(_("%s does not exist!\n") % f)
1099 self.ui.warn(_("%s does not exist!\n") % f)
1099 rejected.append(f)
1100 rejected.append(f)
1100 continue
1101 continue
1101 if st.st_size > 10000000:
1102 if st.st_size > 10000000:
1102 self.ui.warn(_("%s: files over 10MB may cause memory and"
1103 self.ui.warn(_("%s: files over 10MB may cause memory and"
1103 " performance problems\n"
1104 " performance problems\n"
1104 "(use 'hg revert %s' to unadd the file)\n")
1105 "(use 'hg revert %s' to unadd the file)\n")
1105 % (f, f))
1106 % (f, f))
1106 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1107 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1107 self.ui.warn(_("%s not added: only files and symlinks "
1108 self.ui.warn(_("%s not added: only files and symlinks "
1108 "supported currently\n") % f)
1109 "supported currently\n") % f)
1109 rejected.append(p)
1110 rejected.append(p)
1110 elif self.dirstate[f] in 'amn':
1111 elif self.dirstate[f] in 'amn':
1111 self.ui.warn(_("%s already tracked!\n") % f)
1112 self.ui.warn(_("%s already tracked!\n") % f)
1112 elif self.dirstate[f] == 'r':
1113 elif self.dirstate[f] == 'r':
1113 self.dirstate.normallookup(f)
1114 self.dirstate.normallookup(f)
1114 else:
1115 else:
1115 self.dirstate.add(f)
1116 self.dirstate.add(f)
1116 return rejected
1117 return rejected
1117 finally:
1118 finally:
1118 del wlock
1119 del wlock
1119
1120
1120 def forget(self, list):
1121 def forget(self, list):
1121 wlock = self.wlock()
1122 wlock = self.wlock()
1122 try:
1123 try:
1123 for f in list:
1124 for f in list:
1124 if self.dirstate[f] != 'a':
1125 if self.dirstate[f] != 'a':
1125 self.ui.warn(_("%s not added!\n") % f)
1126 self.ui.warn(_("%s not added!\n") % f)
1126 else:
1127 else:
1127 self.dirstate.forget(f)
1128 self.dirstate.forget(f)
1128 finally:
1129 finally:
1129 del wlock
1130 del wlock
1130
1131
1131 def remove(self, list, unlink=False):
1132 def remove(self, list, unlink=False):
1132 wlock = None
1133 wlock = None
1133 try:
1134 try:
1134 if unlink:
1135 if unlink:
1135 for f in list:
1136 for f in list:
1136 try:
1137 try:
1137 util.unlink(self.wjoin(f))
1138 util.unlink(self.wjoin(f))
1138 except OSError, inst:
1139 except OSError, inst:
1139 if inst.errno != errno.ENOENT:
1140 if inst.errno != errno.ENOENT:
1140 raise
1141 raise
1141 wlock = self.wlock()
1142 wlock = self.wlock()
1142 for f in list:
1143 for f in list:
1143 if unlink and os.path.exists(self.wjoin(f)):
1144 if unlink and os.path.exists(self.wjoin(f)):
1144 self.ui.warn(_("%s still exists!\n") % f)
1145 self.ui.warn(_("%s still exists!\n") % f)
1145 elif self.dirstate[f] == 'a':
1146 elif self.dirstate[f] == 'a':
1146 self.dirstate.forget(f)
1147 self.dirstate.forget(f)
1147 elif f not in self.dirstate:
1148 elif f not in self.dirstate:
1148 self.ui.warn(_("%s not tracked!\n") % f)
1149 self.ui.warn(_("%s not tracked!\n") % f)
1149 else:
1150 else:
1150 self.dirstate.remove(f)
1151 self.dirstate.remove(f)
1151 finally:
1152 finally:
1152 del wlock
1153 del wlock
1153
1154
1154 def undelete(self, list):
1155 def undelete(self, list):
1155 wlock = None
1156 wlock = None
1156 try:
1157 try:
1157 manifests = [self.manifest.read(self.changelog.read(p)[0])
1158 manifests = [self.manifest.read(self.changelog.read(p)[0])
1158 for p in self.dirstate.parents() if p != nullid]
1159 for p in self.dirstate.parents() if p != nullid]
1159 wlock = self.wlock()
1160 wlock = self.wlock()
1160 for f in list:
1161 for f in list:
1161 if self.dirstate[f] != 'r':
1162 if self.dirstate[f] != 'r':
1162 self.ui.warn("%s not removed!\n" % f)
1163 self.ui.warn("%s not removed!\n" % f)
1163 else:
1164 else:
1164 m = f in manifests[0] and manifests[0] or manifests[1]
1165 m = f in manifests[0] and manifests[0] or manifests[1]
1165 t = self.file(f).read(m[f])
1166 t = self.file(f).read(m[f])
1166 self.wwrite(f, t, m.flags(f))
1167 self.wwrite(f, t, m.flags(f))
1167 self.dirstate.normal(f)
1168 self.dirstate.normal(f)
1168 finally:
1169 finally:
1169 del wlock
1170 del wlock
1170
1171
1171 def copy(self, source, dest):
1172 def copy(self, source, dest):
1172 wlock = None
1173 wlock = None
1173 try:
1174 try:
1174 p = self.wjoin(dest)
1175 p = self.wjoin(dest)
1175 if not (os.path.exists(p) or os.path.islink(p)):
1176 if not (os.path.exists(p) or os.path.islink(p)):
1176 self.ui.warn(_("%s does not exist!\n") % dest)
1177 self.ui.warn(_("%s does not exist!\n") % dest)
1177 elif not (os.path.isfile(p) or os.path.islink(p)):
1178 elif not (os.path.isfile(p) or os.path.islink(p)):
1178 self.ui.warn(_("copy failed: %s is not a file or a "
1179 self.ui.warn(_("copy failed: %s is not a file or a "
1179 "symbolic link\n") % dest)
1180 "symbolic link\n") % dest)
1180 else:
1181 else:
1181 wlock = self.wlock()
1182 wlock = self.wlock()
1182 if dest not in self.dirstate:
1183 if dest not in self.dirstate:
1183 self.dirstate.add(dest)
1184 self.dirstate.add(dest)
1184 self.dirstate.copy(source, dest)
1185 self.dirstate.copy(source, dest)
1185 finally:
1186 finally:
1186 del wlock
1187 del wlock
1187
1188
1188 def heads(self, start=None):
1189 def heads(self, start=None):
1189 heads = self.changelog.heads(start)
1190 heads = self.changelog.heads(start)
1190 # sort the output in rev descending order
1191 # sort the output in rev descending order
1191 heads = [(-self.changelog.rev(h), h) for h in heads]
1192 heads = [(-self.changelog.rev(h), h) for h in heads]
1192 heads.sort()
1193 heads.sort()
1193 return [n for (r, n) in heads]
1194 return [n for (r, n) in heads]
1194
1195
1195 def branchheads(self, branch=None, start=None):
1196 def branchheads(self, branch=None, start=None):
1196 if branch is None:
1197 if branch is None:
1197 branch = self[None].branch()
1198 branch = self[None].branch()
1198 branches = self.branchtags()
1199 branches = self.branchtags()
1199 if branch not in branches:
1200 if branch not in branches:
1200 return []
1201 return []
1201 # The basic algorithm is this:
1202 # The basic algorithm is this:
1202 #
1203 #
1203 # Start from the branch tip since there are no later revisions that can
1204 # Start from the branch tip since there are no later revisions that can
1204 # possibly be in this branch, and the tip is a guaranteed head.
1205 # possibly be in this branch, and the tip is a guaranteed head.
1205 #
1206 #
1206 # Remember the tip's parents as the first ancestors, since these by
1207 # Remember the tip's parents as the first ancestors, since these by
1207 # definition are not heads.
1208 # definition are not heads.
1208 #
1209 #
1209 # Step backwards from the brach tip through all the revisions. We are
1210 # Step backwards from the brach tip through all the revisions. We are
1210 # guaranteed by the rules of Mercurial that we will now be visiting the
1211 # guaranteed by the rules of Mercurial that we will now be visiting the
1211 # nodes in reverse topological order (children before parents).
1212 # nodes in reverse topological order (children before parents).
1212 #
1213 #
1213 # If a revision is one of the ancestors of a head then we can toss it
1214 # If a revision is one of the ancestors of a head then we can toss it
1214 # out of the ancestors set (we've already found it and won't be
1215 # out of the ancestors set (we've already found it and won't be
1215 # visiting it again) and put its parents in the ancestors set.
1216 # visiting it again) and put its parents in the ancestors set.
1216 #
1217 #
1217 # Otherwise, if a revision is in the branch it's another head, since it
1218 # Otherwise, if a revision is in the branch it's another head, since it
1218 # wasn't in the ancestor list of an existing head. So add it to the
1219 # wasn't in the ancestor list of an existing head. So add it to the
1219 # head list, and add its parents to the ancestor list.
1220 # head list, and add its parents to the ancestor list.
1220 #
1221 #
1221 # If it is not in the branch ignore it.
1222 # If it is not in the branch ignore it.
1222 #
1223 #
1223 # Once we have a list of heads, use nodesbetween to filter out all the
1224 # Once we have a list of heads, use nodesbetween to filter out all the
1224 # heads that cannot be reached from startrev. There may be a more
1225 # heads that cannot be reached from startrev. There may be a more
1225 # efficient way to do this as part of the previous algorithm.
1226 # efficient way to do this as part of the previous algorithm.
1226
1227
1227 set = util.set
1228 set = util.set
1228 heads = [self.changelog.rev(branches[branch])]
1229 heads = [self.changelog.rev(branches[branch])]
1229 # Don't care if ancestors contains nullrev or not.
1230 # Don't care if ancestors contains nullrev or not.
1230 ancestors = set(self.changelog.parentrevs(heads[0]))
1231 ancestors = set(self.changelog.parentrevs(heads[0]))
1231 for rev in xrange(heads[0] - 1, nullrev, -1):
1232 for rev in xrange(heads[0] - 1, nullrev, -1):
1232 if rev in ancestors:
1233 if rev in ancestors:
1233 ancestors.update(self.changelog.parentrevs(rev))
1234 ancestors.update(self.changelog.parentrevs(rev))
1234 ancestors.remove(rev)
1235 ancestors.remove(rev)
1235 elif self[rev].branch() == branch:
1236 elif self[rev].branch() == branch:
1236 heads.append(rev)
1237 heads.append(rev)
1237 ancestors.update(self.changelog.parentrevs(rev))
1238 ancestors.update(self.changelog.parentrevs(rev))
1238 heads = [self.changelog.node(rev) for rev in heads]
1239 heads = [self.changelog.node(rev) for rev in heads]
1239 if start is not None:
1240 if start is not None:
1240 heads = self.changelog.nodesbetween([start], heads)[2]
1241 heads = self.changelog.nodesbetween([start], heads)[2]
1241 return heads
1242 return heads
1242
1243
1243 def branches(self, nodes):
1244 def branches(self, nodes):
1244 if not nodes:
1245 if not nodes:
1245 nodes = [self.changelog.tip()]
1246 nodes = [self.changelog.tip()]
1246 b = []
1247 b = []
1247 for n in nodes:
1248 for n in nodes:
1248 t = n
1249 t = n
1249 while 1:
1250 while 1:
1250 p = self.changelog.parents(n)
1251 p = self.changelog.parents(n)
1251 if p[1] != nullid or p[0] == nullid:
1252 if p[1] != nullid or p[0] == nullid:
1252 b.append((t, n, p[0], p[1]))
1253 b.append((t, n, p[0], p[1]))
1253 break
1254 break
1254 n = p[0]
1255 n = p[0]
1255 return b
1256 return b
1256
1257
1257 def between(self, pairs):
1258 def between(self, pairs):
1258 r = []
1259 r = []
1259
1260
1260 for top, bottom in pairs:
1261 for top, bottom in pairs:
1261 n, l, i = top, [], 0
1262 n, l, i = top, [], 0
1262 f = 1
1263 f = 1
1263
1264
1264 while n != bottom:
1265 while n != bottom:
1265 p = self.changelog.parents(n)[0]
1266 p = self.changelog.parents(n)[0]
1266 if i == f:
1267 if i == f:
1267 l.append(n)
1268 l.append(n)
1268 f = f * 2
1269 f = f * 2
1269 n = p
1270 n = p
1270 i += 1
1271 i += 1
1271
1272
1272 r.append(l)
1273 r.append(l)
1273
1274
1274 return r
1275 return r
1275
1276
1276 def findincoming(self, remote, base=None, heads=None, force=False):
1277 def findincoming(self, remote, base=None, heads=None, force=False):
1277 """Return list of roots of the subsets of missing nodes from remote
1278 """Return list of roots of the subsets of missing nodes from remote
1278
1279
1279 If base dict is specified, assume that these nodes and their parents
1280 If base dict is specified, assume that these nodes and their parents
1280 exist on the remote side and that no child of a node of base exists
1281 exist on the remote side and that no child of a node of base exists
1281 in both remote and self.
1282 in both remote and self.
1282 Furthermore base will be updated to include the nodes that exists
1283 Furthermore base will be updated to include the nodes that exists
1283 in self and remote but no children exists in self and remote.
1284 in self and remote but no children exists in self and remote.
1284 If a list of heads is specified, return only nodes which are heads
1285 If a list of heads is specified, return only nodes which are heads
1285 or ancestors of these heads.
1286 or ancestors of these heads.
1286
1287
1287 All the ancestors of base are in self and in remote.
1288 All the ancestors of base are in self and in remote.
1288 All the descendants of the list returned are missing in self.
1289 All the descendants of the list returned are missing in self.
1289 (and so we know that the rest of the nodes are missing in remote, see
1290 (and so we know that the rest of the nodes are missing in remote, see
1290 outgoing)
1291 outgoing)
1291 """
1292 """
1292 m = self.changelog.nodemap
1293 m = self.changelog.nodemap
1293 search = []
1294 search = []
1294 fetch = {}
1295 fetch = {}
1295 seen = {}
1296 seen = {}
1296 seenbranch = {}
1297 seenbranch = {}
1297 if base == None:
1298 if base == None:
1298 base = {}
1299 base = {}
1299
1300
1300 if not heads:
1301 if not heads:
1301 heads = remote.heads()
1302 heads = remote.heads()
1302
1303
1303 if self.changelog.tip() == nullid:
1304 if self.changelog.tip() == nullid:
1304 base[nullid] = 1
1305 base[nullid] = 1
1305 if heads != [nullid]:
1306 if heads != [nullid]:
1306 return [nullid]
1307 return [nullid]
1307 return []
1308 return []
1308
1309
1309 # assume we're closer to the tip than the root
1310 # assume we're closer to the tip than the root
1310 # and start by examining the heads
1311 # and start by examining the heads
1311 self.ui.status(_("searching for changes\n"))
1312 self.ui.status(_("searching for changes\n"))
1312
1313
1313 unknown = []
1314 unknown = []
1314 for h in heads:
1315 for h in heads:
1315 if h not in m:
1316 if h not in m:
1316 unknown.append(h)
1317 unknown.append(h)
1317 else:
1318 else:
1318 base[h] = 1
1319 base[h] = 1
1319
1320
1320 if not unknown:
1321 if not unknown:
1321 return []
1322 return []
1322
1323
1323 req = dict.fromkeys(unknown)
1324 req = dict.fromkeys(unknown)
1324 reqcnt = 0
1325 reqcnt = 0
1325
1326
1326 # search through remote branches
1327 # search through remote branches
1327 # a 'branch' here is a linear segment of history, with four parts:
1328 # a 'branch' here is a linear segment of history, with four parts:
1328 # head, root, first parent, second parent
1329 # head, root, first parent, second parent
1329 # (a branch always has two parents (or none) by definition)
1330 # (a branch always has two parents (or none) by definition)
1330 unknown = remote.branches(unknown)
1331 unknown = remote.branches(unknown)
1331 while unknown:
1332 while unknown:
1332 r = []
1333 r = []
1333 while unknown:
1334 while unknown:
1334 n = unknown.pop(0)
1335 n = unknown.pop(0)
1335 if n[0] in seen:
1336 if n[0] in seen:
1336 continue
1337 continue
1337
1338
1338 self.ui.debug(_("examining %s:%s\n")
1339 self.ui.debug(_("examining %s:%s\n")
1339 % (short(n[0]), short(n[1])))
1340 % (short(n[0]), short(n[1])))
1340 if n[0] == nullid: # found the end of the branch
1341 if n[0] == nullid: # found the end of the branch
1341 pass
1342 pass
1342 elif n in seenbranch:
1343 elif n in seenbranch:
1343 self.ui.debug(_("branch already found\n"))
1344 self.ui.debug(_("branch already found\n"))
1344 continue
1345 continue
1345 elif n[1] and n[1] in m: # do we know the base?
1346 elif n[1] and n[1] in m: # do we know the base?
1346 self.ui.debug(_("found incomplete branch %s:%s\n")
1347 self.ui.debug(_("found incomplete branch %s:%s\n")
1347 % (short(n[0]), short(n[1])))
1348 % (short(n[0]), short(n[1])))
1348 search.append(n) # schedule branch range for scanning
1349 search.append(n) # schedule branch range for scanning
1349 seenbranch[n] = 1
1350 seenbranch[n] = 1
1350 else:
1351 else:
1351 if n[1] not in seen and n[1] not in fetch:
1352 if n[1] not in seen and n[1] not in fetch:
1352 if n[2] in m and n[3] in m:
1353 if n[2] in m and n[3] in m:
1353 self.ui.debug(_("found new changeset %s\n") %
1354 self.ui.debug(_("found new changeset %s\n") %
1354 short(n[1]))
1355 short(n[1]))
1355 fetch[n[1]] = 1 # earliest unknown
1356 fetch[n[1]] = 1 # earliest unknown
1356 for p in n[2:4]:
1357 for p in n[2:4]:
1357 if p in m:
1358 if p in m:
1358 base[p] = 1 # latest known
1359 base[p] = 1 # latest known
1359
1360
1360 for p in n[2:4]:
1361 for p in n[2:4]:
1361 if p not in req and p not in m:
1362 if p not in req and p not in m:
1362 r.append(p)
1363 r.append(p)
1363 req[p] = 1
1364 req[p] = 1
1364 seen[n[0]] = 1
1365 seen[n[0]] = 1
1365
1366
1366 if r:
1367 if r:
1367 reqcnt += 1
1368 reqcnt += 1
1368 self.ui.debug(_("request %d: %s\n") %
1369 self.ui.debug(_("request %d: %s\n") %
1369 (reqcnt, " ".join(map(short, r))))
1370 (reqcnt, " ".join(map(short, r))))
1370 for p in xrange(0, len(r), 10):
1371 for p in xrange(0, len(r), 10):
1371 for b in remote.branches(r[p:p+10]):
1372 for b in remote.branches(r[p:p+10]):
1372 self.ui.debug(_("received %s:%s\n") %
1373 self.ui.debug(_("received %s:%s\n") %
1373 (short(b[0]), short(b[1])))
1374 (short(b[0]), short(b[1])))
1374 unknown.append(b)
1375 unknown.append(b)
1375
1376
1376 # do binary search on the branches we found
1377 # do binary search on the branches we found
1377 while search:
1378 while search:
1378 n = search.pop(0)
1379 n = search.pop(0)
1379 reqcnt += 1
1380 reqcnt += 1
1380 l = remote.between([(n[0], n[1])])[0]
1381 l = remote.between([(n[0], n[1])])[0]
1381 l.append(n[1])
1382 l.append(n[1])
1382 p = n[0]
1383 p = n[0]
1383 f = 1
1384 f = 1
1384 for i in l:
1385 for i in l:
1385 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1386 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1386 if i in m:
1387 if i in m:
1387 if f <= 2:
1388 if f <= 2:
1388 self.ui.debug(_("found new branch changeset %s\n") %
1389 self.ui.debug(_("found new branch changeset %s\n") %
1389 short(p))
1390 short(p))
1390 fetch[p] = 1
1391 fetch[p] = 1
1391 base[i] = 1
1392 base[i] = 1
1392 else:
1393 else:
1393 self.ui.debug(_("narrowed branch search to %s:%s\n")
1394 self.ui.debug(_("narrowed branch search to %s:%s\n")
1394 % (short(p), short(i)))
1395 % (short(p), short(i)))
1395 search.append((p, i))
1396 search.append((p, i))
1396 break
1397 break
1397 p, f = i, f * 2
1398 p, f = i, f * 2
1398
1399
1399 # sanity check our fetch list
1400 # sanity check our fetch list
1400 for f in fetch.keys():
1401 for f in fetch.keys():
1401 if f in m:
1402 if f in m:
1402 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1403 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1403
1404
1404 if base.keys() == [nullid]:
1405 if base.keys() == [nullid]:
1405 if force:
1406 if force:
1406 self.ui.warn(_("warning: repository is unrelated\n"))
1407 self.ui.warn(_("warning: repository is unrelated\n"))
1407 else:
1408 else:
1408 raise util.Abort(_("repository is unrelated"))
1409 raise util.Abort(_("repository is unrelated"))
1409
1410
1410 self.ui.debug(_("found new changesets starting at ") +
1411 self.ui.debug(_("found new changesets starting at ") +
1411 " ".join([short(f) for f in fetch]) + "\n")
1412 " ".join([short(f) for f in fetch]) + "\n")
1412
1413
1413 self.ui.debug(_("%d total queries\n") % reqcnt)
1414 self.ui.debug(_("%d total queries\n") % reqcnt)
1414
1415
1415 return fetch.keys()
1416 return fetch.keys()
1416
1417
1417 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 """Return list of nodes that are roots of subsets not in remote
1419 """Return list of nodes that are roots of subsets not in remote
1419
1420
1420 If base dict is specified, assume that these nodes and their parents
1421 If base dict is specified, assume that these nodes and their parents
1421 exist on the remote side.
1422 exist on the remote side.
1422 If a list of heads is specified, return only nodes which are heads
1423 If a list of heads is specified, return only nodes which are heads
1423 or ancestors of these heads, and return a second element which
1424 or ancestors of these heads, and return a second element which
1424 contains all remote heads which get new children.
1425 contains all remote heads which get new children.
1425 """
1426 """
1426 if base == None:
1427 if base == None:
1427 base = {}
1428 base = {}
1428 self.findincoming(remote, base, heads, force=force)
1429 self.findincoming(remote, base, heads, force=force)
1429
1430
1430 self.ui.debug(_("common changesets up to ")
1431 self.ui.debug(_("common changesets up to ")
1431 + " ".join(map(short, base.keys())) + "\n")
1432 + " ".join(map(short, base.keys())) + "\n")
1432
1433
1433 remain = dict.fromkeys(self.changelog.nodemap)
1434 remain = dict.fromkeys(self.changelog.nodemap)
1434
1435
1435 # prune everything remote has from the tree
1436 # prune everything remote has from the tree
1436 del remain[nullid]
1437 del remain[nullid]
1437 remove = base.keys()
1438 remove = base.keys()
1438 while remove:
1439 while remove:
1439 n = remove.pop(0)
1440 n = remove.pop(0)
1440 if n in remain:
1441 if n in remain:
1441 del remain[n]
1442 del remain[n]
1442 for p in self.changelog.parents(n):
1443 for p in self.changelog.parents(n):
1443 remove.append(p)
1444 remove.append(p)
1444
1445
1445 # find every node whose parents have been pruned
1446 # find every node whose parents have been pruned
1446 subset = []
1447 subset = []
1447 # find every remote head that will get new children
1448 # find every remote head that will get new children
1448 updated_heads = {}
1449 updated_heads = {}
1449 for n in remain:
1450 for n in remain:
1450 p1, p2 = self.changelog.parents(n)
1451 p1, p2 = self.changelog.parents(n)
1451 if p1 not in remain and p2 not in remain:
1452 if p1 not in remain and p2 not in remain:
1452 subset.append(n)
1453 subset.append(n)
1453 if heads:
1454 if heads:
1454 if p1 in heads:
1455 if p1 in heads:
1455 updated_heads[p1] = True
1456 updated_heads[p1] = True
1456 if p2 in heads:
1457 if p2 in heads:
1457 updated_heads[p2] = True
1458 updated_heads[p2] = True
1458
1459
1459 # this is the set of all roots we have to push
1460 # this is the set of all roots we have to push
1460 if heads:
1461 if heads:
1461 return subset, updated_heads.keys()
1462 return subset, updated_heads.keys()
1462 else:
1463 else:
1463 return subset
1464 return subset
1464
1465
1465 def pull(self, remote, heads=None, force=False):
1466 def pull(self, remote, heads=None, force=False):
1466 lock = self.lock()
1467 lock = self.lock()
1467 try:
1468 try:
1468 fetch = self.findincoming(remote, heads=heads, force=force)
1469 fetch = self.findincoming(remote, heads=heads, force=force)
1469 if fetch == [nullid]:
1470 if fetch == [nullid]:
1470 self.ui.status(_("requesting all changes\n"))
1471 self.ui.status(_("requesting all changes\n"))
1471
1472
1472 if not fetch:
1473 if not fetch:
1473 self.ui.status(_("no changes found\n"))
1474 self.ui.status(_("no changes found\n"))
1474 return 0
1475 return 0
1475
1476
1476 if heads is None:
1477 if heads is None:
1477 cg = remote.changegroup(fetch, 'pull')
1478 cg = remote.changegroup(fetch, 'pull')
1478 else:
1479 else:
1479 if 'changegroupsubset' not in remote.capabilities:
1480 if 'changegroupsubset' not in remote.capabilities:
1480 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1481 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1481 cg = remote.changegroupsubset(fetch, heads, 'pull')
1482 cg = remote.changegroupsubset(fetch, heads, 'pull')
1482 return self.addchangegroup(cg, 'pull', remote.url())
1483 return self.addchangegroup(cg, 'pull', remote.url())
1483 finally:
1484 finally:
1484 del lock
1485 del lock
1485
1486
1486 def push(self, remote, force=False, revs=None):
1487 def push(self, remote, force=False, revs=None):
1487 # there are two ways to push to remote repo:
1488 # there are two ways to push to remote repo:
1488 #
1489 #
1489 # addchangegroup assumes local user can lock remote
1490 # addchangegroup assumes local user can lock remote
1490 # repo (local filesystem, old ssh servers).
1491 # repo (local filesystem, old ssh servers).
1491 #
1492 #
1492 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # servers, http servers).
1494 # servers, http servers).
1494
1495
1495 if remote.capable('unbundle'):
1496 if remote.capable('unbundle'):
1496 return self.push_unbundle(remote, force, revs)
1497 return self.push_unbundle(remote, force, revs)
1497 return self.push_addchangegroup(remote, force, revs)
1498 return self.push_addchangegroup(remote, force, revs)
1498
1499
1499 def prepush(self, remote, force, revs):
1500 def prepush(self, remote, force, revs):
1500 base = {}
1501 base = {}
1501 remote_heads = remote.heads()
1502 remote_heads = remote.heads()
1502 inc = self.findincoming(remote, base, remote_heads, force=force)
1503 inc = self.findincoming(remote, base, remote_heads, force=force)
1503
1504
1504 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1505 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1505 if revs is not None:
1506 if revs is not None:
1506 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1507 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1507 else:
1508 else:
1508 bases, heads = update, self.changelog.heads()
1509 bases, heads = update, self.changelog.heads()
1509
1510
1510 if not bases:
1511 if not bases:
1511 self.ui.status(_("no changes found\n"))
1512 self.ui.status(_("no changes found\n"))
1512 return None, 1
1513 return None, 1
1513 elif not force:
1514 elif not force:
1514 # check if we're creating new remote heads
1515 # check if we're creating new remote heads
1515 # to be a remote head after push, node must be either
1516 # to be a remote head after push, node must be either
1516 # - unknown locally
1517 # - unknown locally
1517 # - a local outgoing head descended from update
1518 # - a local outgoing head descended from update
1518 # - a remote head that's known locally and not
1519 # - a remote head that's known locally and not
1519 # ancestral to an outgoing head
1520 # ancestral to an outgoing head
1520
1521
1521 warn = 0
1522 warn = 0
1522
1523
1523 if remote_heads == [nullid]:
1524 if remote_heads == [nullid]:
1524 warn = 0
1525 warn = 0
1525 elif not revs and len(heads) > len(remote_heads):
1526 elif not revs and len(heads) > len(remote_heads):
1526 warn = 1
1527 warn = 1
1527 else:
1528 else:
1528 newheads = list(heads)
1529 newheads = list(heads)
1529 for r in remote_heads:
1530 for r in remote_heads:
1530 if r in self.changelog.nodemap:
1531 if r in self.changelog.nodemap:
1531 desc = self.changelog.heads(r, heads)
1532 desc = self.changelog.heads(r, heads)
1532 l = [h for h in heads if h in desc]
1533 l = [h for h in heads if h in desc]
1533 if not l:
1534 if not l:
1534 newheads.append(r)
1535 newheads.append(r)
1535 else:
1536 else:
1536 newheads.append(r)
1537 newheads.append(r)
1537 if len(newheads) > len(remote_heads):
1538 if len(newheads) > len(remote_heads):
1538 warn = 1
1539 warn = 1
1539
1540
1540 if warn:
1541 if warn:
1541 self.ui.warn(_("abort: push creates new remote heads!\n"))
1542 self.ui.warn(_("abort: push creates new remote heads!\n"))
1542 self.ui.status(_("(did you forget to merge?"
1543 self.ui.status(_("(did you forget to merge?"
1543 " use push -f to force)\n"))
1544 " use push -f to force)\n"))
1544 return None, 0
1545 return None, 0
1545 elif inc:
1546 elif inc:
1546 self.ui.warn(_("note: unsynced remote changes!\n"))
1547 self.ui.warn(_("note: unsynced remote changes!\n"))
1547
1548
1548
1549
1549 if revs is None:
1550 if revs is None:
1550 cg = self.changegroup(update, 'push')
1551 cg = self.changegroup(update, 'push')
1551 else:
1552 else:
1552 cg = self.changegroupsubset(update, revs, 'push')
1553 cg = self.changegroupsubset(update, revs, 'push')
1553 return cg, remote_heads
1554 return cg, remote_heads
1554
1555
1555 def push_addchangegroup(self, remote, force, revs):
1556 def push_addchangegroup(self, remote, force, revs):
1556 lock = remote.lock()
1557 lock = remote.lock()
1557 try:
1558 try:
1558 ret = self.prepush(remote, force, revs)
1559 ret = self.prepush(remote, force, revs)
1559 if ret[0] is not None:
1560 if ret[0] is not None:
1560 cg, remote_heads = ret
1561 cg, remote_heads = ret
1561 return remote.addchangegroup(cg, 'push', self.url())
1562 return remote.addchangegroup(cg, 'push', self.url())
1562 return ret[1]
1563 return ret[1]
1563 finally:
1564 finally:
1564 del lock
1565 del lock
1565
1566
1566 def push_unbundle(self, remote, force, revs):
1567 def push_unbundle(self, remote, force, revs):
1567 # local repo finds heads on server, finds out what revs it
1568 # local repo finds heads on server, finds out what revs it
1568 # must push. once revs transferred, if server finds it has
1569 # must push. once revs transferred, if server finds it has
1569 # different heads (someone else won commit/push race), server
1570 # different heads (someone else won commit/push race), server
1570 # aborts.
1571 # aborts.
1571
1572
1572 ret = self.prepush(remote, force, revs)
1573 ret = self.prepush(remote, force, revs)
1573 if ret[0] is not None:
1574 if ret[0] is not None:
1574 cg, remote_heads = ret
1575 cg, remote_heads = ret
1575 if force: remote_heads = ['force']
1576 if force: remote_heads = ['force']
1576 return remote.unbundle(cg, remote_heads, 'push')
1577 return remote.unbundle(cg, remote_heads, 'push')
1577 return ret[1]
1578 return ret[1]
1578
1579
1579 def changegroupinfo(self, nodes, source):
1580 def changegroupinfo(self, nodes, source):
1580 if self.ui.verbose or source == 'bundle':
1581 if self.ui.verbose or source == 'bundle':
1581 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 if self.ui.debugflag:
1583 if self.ui.debugflag:
1583 self.ui.debug(_("List of changesets:\n"))
1584 self.ui.debug(_("List of changesets:\n"))
1584 for node in nodes:
1585 for node in nodes:
1585 self.ui.debug("%s\n" % hex(node))
1586 self.ui.debug("%s\n" % hex(node))
1586
1587
1587 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 """This function generates a changegroup consisting of all the nodes
1589 """This function generates a changegroup consisting of all the nodes
1589 that are descendents of any of the bases, and ancestors of any of
1590 that are descendents of any of the bases, and ancestors of any of
1590 the heads.
1591 the heads.
1591
1592
1592 It is fairly complex as determining which filenodes and which
1593 It is fairly complex as determining which filenodes and which
1593 manifest nodes need to be included for the changeset to be complete
1594 manifest nodes need to be included for the changeset to be complete
1594 is non-trivial.
1595 is non-trivial.
1595
1596
1596 Another wrinkle is doing the reverse, figuring out which changeset in
1597 Another wrinkle is doing the reverse, figuring out which changeset in
1597 the changegroup a particular filenode or manifestnode belongs to.
1598 the changegroup a particular filenode or manifestnode belongs to.
1598
1599
1599 The caller can specify some nodes that must be included in the
1600 The caller can specify some nodes that must be included in the
1600 changegroup using the extranodes argument. It should be a dict
1601 changegroup using the extranodes argument. It should be a dict
1601 where the keys are the filenames (or 1 for the manifest), and the
1602 where the keys are the filenames (or 1 for the manifest), and the
1602 values are lists of (node, linknode) tuples, where node is a wanted
1603 values are lists of (node, linknode) tuples, where node is a wanted
1603 node and linknode is the changelog node that should be transmitted as
1604 node and linknode is the changelog node that should be transmitted as
1604 the linkrev.
1605 the linkrev.
1605 """
1606 """
1606
1607
1607 self.hook('preoutgoing', throw=True, source=source)
1608 self.hook('preoutgoing', throw=True, source=source)
1608
1609
1609 # Set up some initial variables
1610 # Set up some initial variables
1610 # Make it easy to refer to self.changelog
1611 # Make it easy to refer to self.changelog
1611 cl = self.changelog
1612 cl = self.changelog
1612 # msng is short for missing - compute the list of changesets in this
1613 # msng is short for missing - compute the list of changesets in this
1613 # changegroup.
1614 # changegroup.
1614 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1615 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1615 self.changegroupinfo(msng_cl_lst, source)
1616 self.changegroupinfo(msng_cl_lst, source)
1616 # Some bases may turn out to be superfluous, and some heads may be
1617 # Some bases may turn out to be superfluous, and some heads may be
1617 # too. nodesbetween will return the minimal set of bases and heads
1618 # too. nodesbetween will return the minimal set of bases and heads
1618 # necessary to re-create the changegroup.
1619 # necessary to re-create the changegroup.
1619
1620
1620 # Known heads are the list of heads that it is assumed the recipient
1621 # Known heads are the list of heads that it is assumed the recipient
1621 # of this changegroup will know about.
1622 # of this changegroup will know about.
1622 knownheads = {}
1623 knownheads = {}
1623 # We assume that all parents of bases are known heads.
1624 # We assume that all parents of bases are known heads.
1624 for n in bases:
1625 for n in bases:
1625 for p in cl.parents(n):
1626 for p in cl.parents(n):
1626 if p != nullid:
1627 if p != nullid:
1627 knownheads[p] = 1
1628 knownheads[p] = 1
1628 knownheads = knownheads.keys()
1629 knownheads = knownheads.keys()
1629 if knownheads:
1630 if knownheads:
1630 # Now that we know what heads are known, we can compute which
1631 # Now that we know what heads are known, we can compute which
1631 # changesets are known. The recipient must know about all
1632 # changesets are known. The recipient must know about all
1632 # changesets required to reach the known heads from the null
1633 # changesets required to reach the known heads from the null
1633 # changeset.
1634 # changeset.
1634 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1635 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1635 junk = None
1636 junk = None
1636 # Transform the list into an ersatz set.
1637 # Transform the list into an ersatz set.
1637 has_cl_set = dict.fromkeys(has_cl_set)
1638 has_cl_set = dict.fromkeys(has_cl_set)
1638 else:
1639 else:
1639 # If there were no known heads, the recipient cannot be assumed to
1640 # If there were no known heads, the recipient cannot be assumed to
1640 # know about any changesets.
1641 # know about any changesets.
1641 has_cl_set = {}
1642 has_cl_set = {}
1642
1643
1643 # Make it easy to refer to self.manifest
1644 # Make it easy to refer to self.manifest
1644 mnfst = self.manifest
1645 mnfst = self.manifest
1645 # We don't know which manifests are missing yet
1646 # We don't know which manifests are missing yet
1646 msng_mnfst_set = {}
1647 msng_mnfst_set = {}
1647 # Nor do we know which filenodes are missing.
1648 # Nor do we know which filenodes are missing.
1648 msng_filenode_set = {}
1649 msng_filenode_set = {}
1649
1650
1650 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1651 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1651 junk = None
1652 junk = None
1652
1653
1653 # A changeset always belongs to itself, so the changenode lookup
1654 # A changeset always belongs to itself, so the changenode lookup
1654 # function for a changenode is identity.
1655 # function for a changenode is identity.
1655 def identity(x):
1656 def identity(x):
1656 return x
1657 return x
1657
1658
1658 # A function generating function. Sets up an environment for the
1659 # A function generating function. Sets up an environment for the
1659 # inner function.
1660 # inner function.
1660 def cmp_by_rev_func(revlog):
1661 def cmp_by_rev_func(revlog):
1661 # Compare two nodes by their revision number in the environment's
1662 # Compare two nodes by their revision number in the environment's
1662 # revision history. Since the revision number both represents the
1663 # revision history. Since the revision number both represents the
1663 # most efficient order to read the nodes in, and represents a
1664 # most efficient order to read the nodes in, and represents a
1664 # topological sorting of the nodes, this function is often useful.
1665 # topological sorting of the nodes, this function is often useful.
1665 def cmp_by_rev(a, b):
1666 def cmp_by_rev(a, b):
1666 return cmp(revlog.rev(a), revlog.rev(b))
1667 return cmp(revlog.rev(a), revlog.rev(b))
1667 return cmp_by_rev
1668 return cmp_by_rev
1668
1669
1669 # If we determine that a particular file or manifest node must be a
1670 # If we determine that a particular file or manifest node must be a
1670 # node that the recipient of the changegroup will already have, we can
1671 # node that the recipient of the changegroup will already have, we can
1671 # also assume the recipient will have all the parents. This function
1672 # also assume the recipient will have all the parents. This function
1672 # prunes them from the set of missing nodes.
1673 # prunes them from the set of missing nodes.
1673 def prune_parents(revlog, hasset, msngset):
1674 def prune_parents(revlog, hasset, msngset):
1674 haslst = hasset.keys()
1675 haslst = hasset.keys()
1675 haslst.sort(cmp_by_rev_func(revlog))
1676 haslst.sort(cmp_by_rev_func(revlog))
1676 for node in haslst:
1677 for node in haslst:
1677 parentlst = [p for p in revlog.parents(node) if p != nullid]
1678 parentlst = [p for p in revlog.parents(node) if p != nullid]
1678 while parentlst:
1679 while parentlst:
1679 n = parentlst.pop()
1680 n = parentlst.pop()
1680 if n not in hasset:
1681 if n not in hasset:
1681 hasset[n] = 1
1682 hasset[n] = 1
1682 p = [p for p in revlog.parents(n) if p != nullid]
1683 p = [p for p in revlog.parents(n) if p != nullid]
1683 parentlst.extend(p)
1684 parentlst.extend(p)
1684 for n in hasset:
1685 for n in hasset:
1685 msngset.pop(n, None)
1686 msngset.pop(n, None)
1686
1687
1687 # This is a function generating function used to set up an environment
1688 # This is a function generating function used to set up an environment
1688 # for the inner function to execute in.
1689 # for the inner function to execute in.
1689 def manifest_and_file_collector(changedfileset):
1690 def manifest_and_file_collector(changedfileset):
1690 # This is an information gathering function that gathers
1691 # This is an information gathering function that gathers
1691 # information from each changeset node that goes out as part of
1692 # information from each changeset node that goes out as part of
1692 # the changegroup. The information gathered is a list of which
1693 # the changegroup. The information gathered is a list of which
1693 # manifest nodes are potentially required (the recipient may
1694 # manifest nodes are potentially required (the recipient may
1694 # already have them) and total list of all files which were
1695 # already have them) and total list of all files which were
1695 # changed in any changeset in the changegroup.
1696 # changed in any changeset in the changegroup.
1696 #
1697 #
1697 # We also remember the first changenode we saw any manifest
1698 # We also remember the first changenode we saw any manifest
1698 # referenced by so we can later determine which changenode 'owns'
1699 # referenced by so we can later determine which changenode 'owns'
1699 # the manifest.
1700 # the manifest.
1700 def collect_manifests_and_files(clnode):
1701 def collect_manifests_and_files(clnode):
1701 c = cl.read(clnode)
1702 c = cl.read(clnode)
1702 for f in c[3]:
1703 for f in c[3]:
1703 # This is to make sure we only have one instance of each
1704 # This is to make sure we only have one instance of each
1704 # filename string for each filename.
1705 # filename string for each filename.
1705 changedfileset.setdefault(f, f)
1706 changedfileset.setdefault(f, f)
1706 msng_mnfst_set.setdefault(c[0], clnode)
1707 msng_mnfst_set.setdefault(c[0], clnode)
1707 return collect_manifests_and_files
1708 return collect_manifests_and_files
1708
1709
1709 # Figure out which manifest nodes (of the ones we think might be part
1710 # Figure out which manifest nodes (of the ones we think might be part
1710 # of the changegroup) the recipient must know about and remove them
1711 # of the changegroup) the recipient must know about and remove them
1711 # from the changegroup.
1712 # from the changegroup.
1712 def prune_manifests():
1713 def prune_manifests():
1713 has_mnfst_set = {}
1714 has_mnfst_set = {}
1714 for n in msng_mnfst_set:
1715 for n in msng_mnfst_set:
1715 # If a 'missing' manifest thinks it belongs to a changenode
1716 # If a 'missing' manifest thinks it belongs to a changenode
1716 # the recipient is assumed to have, obviously the recipient
1717 # the recipient is assumed to have, obviously the recipient
1717 # must have that manifest.
1718 # must have that manifest.
1718 linknode = cl.node(mnfst.linkrev(n))
1719 linknode = cl.node(mnfst.linkrev(n))
1719 if linknode in has_cl_set:
1720 if linknode in has_cl_set:
1720 has_mnfst_set[n] = 1
1721 has_mnfst_set[n] = 1
1721 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1722 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1722
1723
1723 # Use the information collected in collect_manifests_and_files to say
1724 # Use the information collected in collect_manifests_and_files to say
1724 # which changenode any manifestnode belongs to.
1725 # which changenode any manifestnode belongs to.
1725 def lookup_manifest_link(mnfstnode):
1726 def lookup_manifest_link(mnfstnode):
1726 return msng_mnfst_set[mnfstnode]
1727 return msng_mnfst_set[mnfstnode]
1727
1728
1728 # A function generating function that sets up the initial environment
1729 # A function generating function that sets up the initial environment
1729 # the inner function.
1730 # the inner function.
1730 def filenode_collector(changedfiles):
1731 def filenode_collector(changedfiles):
1731 next_rev = [0]
1732 next_rev = [0]
1732 # This gathers information from each manifestnode included in the
1733 # This gathers information from each manifestnode included in the
1733 # changegroup about which filenodes the manifest node references
1734 # changegroup about which filenodes the manifest node references
1734 # so we can include those in the changegroup too.
1735 # so we can include those in the changegroup too.
1735 #
1736 #
1736 # It also remembers which changenode each filenode belongs to. It
1737 # It also remembers which changenode each filenode belongs to. It
1737 # does this by assuming the a filenode belongs to the changenode
1738 # does this by assuming the a filenode belongs to the changenode
1738 # the first manifest that references it belongs to.
1739 # the first manifest that references it belongs to.
1739 def collect_msng_filenodes(mnfstnode):
1740 def collect_msng_filenodes(mnfstnode):
1740 r = mnfst.rev(mnfstnode)
1741 r = mnfst.rev(mnfstnode)
1741 if r == next_rev[0]:
1742 if r == next_rev[0]:
1742 # If the last rev we looked at was the one just previous,
1743 # If the last rev we looked at was the one just previous,
1743 # we only need to see a diff.
1744 # we only need to see a diff.
1744 deltamf = mnfst.readdelta(mnfstnode)
1745 deltamf = mnfst.readdelta(mnfstnode)
1745 # For each line in the delta
1746 # For each line in the delta
1746 for f, fnode in deltamf.items():
1747 for f, fnode in deltamf.items():
1747 f = changedfiles.get(f, None)
1748 f = changedfiles.get(f, None)
1748 # And if the file is in the list of files we care
1749 # And if the file is in the list of files we care
1749 # about.
1750 # about.
1750 if f is not None:
1751 if f is not None:
1751 # Get the changenode this manifest belongs to
1752 # Get the changenode this manifest belongs to
1752 clnode = msng_mnfst_set[mnfstnode]
1753 clnode = msng_mnfst_set[mnfstnode]
1753 # Create the set of filenodes for the file if
1754 # Create the set of filenodes for the file if
1754 # there isn't one already.
1755 # there isn't one already.
1755 ndset = msng_filenode_set.setdefault(f, {})
1756 ndset = msng_filenode_set.setdefault(f, {})
1756 # And set the filenode's changelog node to the
1757 # And set the filenode's changelog node to the
1757 # manifest's if it hasn't been set already.
1758 # manifest's if it hasn't been set already.
1758 ndset.setdefault(fnode, clnode)
1759 ndset.setdefault(fnode, clnode)
1759 else:
1760 else:
1760 # Otherwise we need a full manifest.
1761 # Otherwise we need a full manifest.
1761 m = mnfst.read(mnfstnode)
1762 m = mnfst.read(mnfstnode)
1762 # For every file in we care about.
1763 # For every file in we care about.
1763 for f in changedfiles:
1764 for f in changedfiles:
1764 fnode = m.get(f, None)
1765 fnode = m.get(f, None)
1765 # If it's in the manifest
1766 # If it's in the manifest
1766 if fnode is not None:
1767 if fnode is not None:
1767 # See comments above.
1768 # See comments above.
1768 clnode = msng_mnfst_set[mnfstnode]
1769 clnode = msng_mnfst_set[mnfstnode]
1769 ndset = msng_filenode_set.setdefault(f, {})
1770 ndset = msng_filenode_set.setdefault(f, {})
1770 ndset.setdefault(fnode, clnode)
1771 ndset.setdefault(fnode, clnode)
1771 # Remember the revision we hope to see next.
1772 # Remember the revision we hope to see next.
1772 next_rev[0] = r + 1
1773 next_rev[0] = r + 1
1773 return collect_msng_filenodes
1774 return collect_msng_filenodes
1774
1775
1775 # We have a list of filenodes we think we need for a file, lets remove
1776 # We have a list of filenodes we think we need for a file, lets remove
1776 # all those we now the recipient must have.
1777 # all those we now the recipient must have.
1777 def prune_filenodes(f, filerevlog):
1778 def prune_filenodes(f, filerevlog):
1778 msngset = msng_filenode_set[f]
1779 msngset = msng_filenode_set[f]
1779 hasset = {}
1780 hasset = {}
1780 # If a 'missing' filenode thinks it belongs to a changenode we
1781 # If a 'missing' filenode thinks it belongs to a changenode we
1781 # assume the recipient must have, then the recipient must have
1782 # assume the recipient must have, then the recipient must have
1782 # that filenode.
1783 # that filenode.
1783 for n in msngset:
1784 for n in msngset:
1784 clnode = cl.node(filerevlog.linkrev(n))
1785 clnode = cl.node(filerevlog.linkrev(n))
1785 if clnode in has_cl_set:
1786 if clnode in has_cl_set:
1786 hasset[n] = 1
1787 hasset[n] = 1
1787 prune_parents(filerevlog, hasset, msngset)
1788 prune_parents(filerevlog, hasset, msngset)
1788
1789
1789 # A function generator function that sets up the a context for the
1790 # A function generator function that sets up the a context for the
1790 # inner function.
1791 # inner function.
1791 def lookup_filenode_link_func(fname):
1792 def lookup_filenode_link_func(fname):
1792 msngset = msng_filenode_set[fname]
1793 msngset = msng_filenode_set[fname]
1793 # Lookup the changenode the filenode belongs to.
1794 # Lookup the changenode the filenode belongs to.
1794 def lookup_filenode_link(fnode):
1795 def lookup_filenode_link(fnode):
1795 return msngset[fnode]
1796 return msngset[fnode]
1796 return lookup_filenode_link
1797 return lookup_filenode_link
1797
1798
1798 # Add the nodes that were explicitly requested.
1799 # Add the nodes that were explicitly requested.
1799 def add_extra_nodes(name, nodes):
1800 def add_extra_nodes(name, nodes):
1800 if not extranodes or name not in extranodes:
1801 if not extranodes or name not in extranodes:
1801 return
1802 return
1802
1803
1803 for node, linknode in extranodes[name]:
1804 for node, linknode in extranodes[name]:
1804 if node not in nodes:
1805 if node not in nodes:
1805 nodes[node] = linknode
1806 nodes[node] = linknode
1806
1807
1807 # Now that we have all theses utility functions to help out and
1808 # Now that we have all theses utility functions to help out and
1808 # logically divide up the task, generate the group.
1809 # logically divide up the task, generate the group.
1809 def gengroup():
1810 def gengroup():
1810 # The set of changed files starts empty.
1811 # The set of changed files starts empty.
1811 changedfiles = {}
1812 changedfiles = {}
1812 # Create a changenode group generator that will call our functions
1813 # Create a changenode group generator that will call our functions
1813 # back to lookup the owning changenode and collect information.
1814 # back to lookup the owning changenode and collect information.
1814 group = cl.group(msng_cl_lst, identity,
1815 group = cl.group(msng_cl_lst, identity,
1815 manifest_and_file_collector(changedfiles))
1816 manifest_and_file_collector(changedfiles))
1816 for chnk in group:
1817 for chnk in group:
1817 yield chnk
1818 yield chnk
1818
1819
1819 # The list of manifests has been collected by the generator
1820 # The list of manifests has been collected by the generator
1820 # calling our functions back.
1821 # calling our functions back.
1821 prune_manifests()
1822 prune_manifests()
1822 add_extra_nodes(1, msng_mnfst_set)
1823 add_extra_nodes(1, msng_mnfst_set)
1823 msng_mnfst_lst = msng_mnfst_set.keys()
1824 msng_mnfst_lst = msng_mnfst_set.keys()
1824 # Sort the manifestnodes by revision number.
1825 # Sort the manifestnodes by revision number.
1825 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1826 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1826 # Create a generator for the manifestnodes that calls our lookup
1827 # Create a generator for the manifestnodes that calls our lookup
1827 # and data collection functions back.
1828 # and data collection functions back.
1828 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1829 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1829 filenode_collector(changedfiles))
1830 filenode_collector(changedfiles))
1830 for chnk in group:
1831 for chnk in group:
1831 yield chnk
1832 yield chnk
1832
1833
1833 # These are no longer needed, dereference and toss the memory for
1834 # These are no longer needed, dereference and toss the memory for
1834 # them.
1835 # them.
1835 msng_mnfst_lst = None
1836 msng_mnfst_lst = None
1836 msng_mnfst_set.clear()
1837 msng_mnfst_set.clear()
1837
1838
1838 if extranodes:
1839 if extranodes:
1839 for fname in extranodes:
1840 for fname in extranodes:
1840 if isinstance(fname, int):
1841 if isinstance(fname, int):
1841 continue
1842 continue
1842 add_extra_nodes(fname,
1843 add_extra_nodes(fname,
1843 msng_filenode_set.setdefault(fname, {}))
1844 msng_filenode_set.setdefault(fname, {}))
1844 changedfiles[fname] = 1
1845 changedfiles[fname] = 1
1845 changedfiles = changedfiles.keys()
1846 changedfiles = changedfiles.keys()
1846 changedfiles.sort()
1847 changedfiles.sort()
1847 # Go through all our files in order sorted by name.
1848 # Go through all our files in order sorted by name.
1848 for fname in changedfiles:
1849 for fname in changedfiles:
1849 filerevlog = self.file(fname)
1850 filerevlog = self.file(fname)
1850 if not len(filerevlog):
1851 if not len(filerevlog):
1851 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 # Toss out the filenodes that the recipient isn't really
1853 # Toss out the filenodes that the recipient isn't really
1853 # missing.
1854 # missing.
1854 if fname in msng_filenode_set:
1855 if fname in msng_filenode_set:
1855 prune_filenodes(fname, filerevlog)
1856 prune_filenodes(fname, filerevlog)
1856 msng_filenode_lst = msng_filenode_set[fname].keys()
1857 msng_filenode_lst = msng_filenode_set[fname].keys()
1857 else:
1858 else:
1858 msng_filenode_lst = []
1859 msng_filenode_lst = []
1859 # If any filenodes are left, generate the group for them,
1860 # If any filenodes are left, generate the group for them,
1860 # otherwise don't bother.
1861 # otherwise don't bother.
1861 if len(msng_filenode_lst) > 0:
1862 if len(msng_filenode_lst) > 0:
1862 yield changegroup.chunkheader(len(fname))
1863 yield changegroup.chunkheader(len(fname))
1863 yield fname
1864 yield fname
1864 # Sort the filenodes by their revision #
1865 # Sort the filenodes by their revision #
1865 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1866 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1866 # Create a group generator and only pass in a changenode
1867 # Create a group generator and only pass in a changenode
1867 # lookup function as we need to collect no information
1868 # lookup function as we need to collect no information
1868 # from filenodes.
1869 # from filenodes.
1869 group = filerevlog.group(msng_filenode_lst,
1870 group = filerevlog.group(msng_filenode_lst,
1870 lookup_filenode_link_func(fname))
1871 lookup_filenode_link_func(fname))
1871 for chnk in group:
1872 for chnk in group:
1872 yield chnk
1873 yield chnk
1873 if fname in msng_filenode_set:
1874 if fname in msng_filenode_set:
1874 # Don't need this anymore, toss it to free memory.
1875 # Don't need this anymore, toss it to free memory.
1875 del msng_filenode_set[fname]
1876 del msng_filenode_set[fname]
1876 # Signal that no more groups are left.
1877 # Signal that no more groups are left.
1877 yield changegroup.closechunk()
1878 yield changegroup.closechunk()
1878
1879
1879 if msng_cl_lst:
1880 if msng_cl_lst:
1880 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1881 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1881
1882
1882 return util.chunkbuffer(gengroup())
1883 return util.chunkbuffer(gengroup())
1883
1884
1884 def changegroup(self, basenodes, source):
1885 def changegroup(self, basenodes, source):
1885 """Generate a changegroup of all nodes that we have that a recipient
1886 """Generate a changegroup of all nodes that we have that a recipient
1886 doesn't.
1887 doesn't.
1887
1888
1888 This is much easier than the previous function as we can assume that
1889 This is much easier than the previous function as we can assume that
1889 the recipient has any changenode we aren't sending them."""
1890 the recipient has any changenode we aren't sending them."""
1890
1891
1891 self.hook('preoutgoing', throw=True, source=source)
1892 self.hook('preoutgoing', throw=True, source=source)
1892
1893
1893 cl = self.changelog
1894 cl = self.changelog
1894 nodes = cl.nodesbetween(basenodes, None)[0]
1895 nodes = cl.nodesbetween(basenodes, None)[0]
1895 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1896 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1896 self.changegroupinfo(nodes, source)
1897 self.changegroupinfo(nodes, source)
1897
1898
1898 def identity(x):
1899 def identity(x):
1899 return x
1900 return x
1900
1901
1901 def gennodelst(log):
1902 def gennodelst(log):
1902 for r in log:
1903 for r in log:
1903 n = log.node(r)
1904 n = log.node(r)
1904 if log.linkrev(n) in revset:
1905 if log.linkrev(n) in revset:
1905 yield n
1906 yield n
1906
1907
1907 def changed_file_collector(changedfileset):
1908 def changed_file_collector(changedfileset):
1908 def collect_changed_files(clnode):
1909 def collect_changed_files(clnode):
1909 c = cl.read(clnode)
1910 c = cl.read(clnode)
1910 for fname in c[3]:
1911 for fname in c[3]:
1911 changedfileset[fname] = 1
1912 changedfileset[fname] = 1
1912 return collect_changed_files
1913 return collect_changed_files
1913
1914
1914 def lookuprevlink_func(revlog):
1915 def lookuprevlink_func(revlog):
1915 def lookuprevlink(n):
1916 def lookuprevlink(n):
1916 return cl.node(revlog.linkrev(n))
1917 return cl.node(revlog.linkrev(n))
1917 return lookuprevlink
1918 return lookuprevlink
1918
1919
1919 def gengroup():
1920 def gengroup():
1920 # construct a list of all changed files
1921 # construct a list of all changed files
1921 changedfiles = {}
1922 changedfiles = {}
1922
1923
1923 for chnk in cl.group(nodes, identity,
1924 for chnk in cl.group(nodes, identity,
1924 changed_file_collector(changedfiles)):
1925 changed_file_collector(changedfiles)):
1925 yield chnk
1926 yield chnk
1926 changedfiles = changedfiles.keys()
1927 changedfiles = changedfiles.keys()
1927 changedfiles.sort()
1928 changedfiles.sort()
1928
1929
1929 mnfst = self.manifest
1930 mnfst = self.manifest
1930 nodeiter = gennodelst(mnfst)
1931 nodeiter = gennodelst(mnfst)
1931 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1932 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1932 yield chnk
1933 yield chnk
1933
1934
1934 for fname in changedfiles:
1935 for fname in changedfiles:
1935 filerevlog = self.file(fname)
1936 filerevlog = self.file(fname)
1936 if not len(filerevlog):
1937 if not len(filerevlog):
1937 raise util.Abort(_("empty or missing revlog for %s") % fname)
1938 raise util.Abort(_("empty or missing revlog for %s") % fname)
1938 nodeiter = gennodelst(filerevlog)
1939 nodeiter = gennodelst(filerevlog)
1939 nodeiter = list(nodeiter)
1940 nodeiter = list(nodeiter)
1940 if nodeiter:
1941 if nodeiter:
1941 yield changegroup.chunkheader(len(fname))
1942 yield changegroup.chunkheader(len(fname))
1942 yield fname
1943 yield fname
1943 lookup = lookuprevlink_func(filerevlog)
1944 lookup = lookuprevlink_func(filerevlog)
1944 for chnk in filerevlog.group(nodeiter, lookup):
1945 for chnk in filerevlog.group(nodeiter, lookup):
1945 yield chnk
1946 yield chnk
1946
1947
1947 yield changegroup.closechunk()
1948 yield changegroup.closechunk()
1948
1949
1949 if nodes:
1950 if nodes:
1950 self.hook('outgoing', node=hex(nodes[0]), source=source)
1951 self.hook('outgoing', node=hex(nodes[0]), source=source)
1951
1952
1952 return util.chunkbuffer(gengroup())
1953 return util.chunkbuffer(gengroup())
1953
1954
1954 def addchangegroup(self, source, srctype, url, emptyok=False):
1955 def addchangegroup(self, source, srctype, url, emptyok=False):
1955 """add changegroup to repo.
1956 """add changegroup to repo.
1956
1957
1957 return values:
1958 return values:
1958 - nothing changed or no source: 0
1959 - nothing changed or no source: 0
1959 - more heads than before: 1+added heads (2..n)
1960 - more heads than before: 1+added heads (2..n)
1960 - less heads than before: -1-removed heads (-2..-n)
1961 - less heads than before: -1-removed heads (-2..-n)
1961 - number of heads stays the same: 1
1962 - number of heads stays the same: 1
1962 """
1963 """
1963 def csmap(x):
1964 def csmap(x):
1964 self.ui.debug(_("add changeset %s\n") % short(x))
1965 self.ui.debug(_("add changeset %s\n") % short(x))
1965 return len(cl)
1966 return len(cl)
1966
1967
1967 def revmap(x):
1968 def revmap(x):
1968 return cl.rev(x)
1969 return cl.rev(x)
1969
1970
1970 if not source:
1971 if not source:
1971 return 0
1972 return 0
1972
1973
1973 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1974 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1974
1975
1975 changesets = files = revisions = 0
1976 changesets = files = revisions = 0
1976
1977
1977 # write changelog data to temp files so concurrent readers will not see
1978 # write changelog data to temp files so concurrent readers will not see
1978 # inconsistent view
1979 # inconsistent view
1979 cl = self.changelog
1980 cl = self.changelog
1980 cl.delayupdate()
1981 cl.delayupdate()
1981 oldheads = len(cl.heads())
1982 oldheads = len(cl.heads())
1982
1983
1983 tr = self.transaction()
1984 tr = self.transaction()
1984 try:
1985 try:
1985 trp = weakref.proxy(tr)
1986 trp = weakref.proxy(tr)
1986 # pull off the changeset group
1987 # pull off the changeset group
1987 self.ui.status(_("adding changesets\n"))
1988 self.ui.status(_("adding changesets\n"))
1988 cor = len(cl) - 1
1989 cor = len(cl) - 1
1989 chunkiter = changegroup.chunkiter(source)
1990 chunkiter = changegroup.chunkiter(source)
1990 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1991 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1991 raise util.Abort(_("received changelog group is empty"))
1992 raise util.Abort(_("received changelog group is empty"))
1992 cnr = len(cl) - 1
1993 cnr = len(cl) - 1
1993 changesets = cnr - cor
1994 changesets = cnr - cor
1994
1995
1995 # pull off the manifest group
1996 # pull off the manifest group
1996 self.ui.status(_("adding manifests\n"))
1997 self.ui.status(_("adding manifests\n"))
1997 chunkiter = changegroup.chunkiter(source)
1998 chunkiter = changegroup.chunkiter(source)
1998 # no need to check for empty manifest group here:
1999 # no need to check for empty manifest group here:
1999 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # no new manifest will be created and the manifest group will
2001 # no new manifest will be created and the manifest group will
2001 # be empty during the pull
2002 # be empty during the pull
2002 self.manifest.addgroup(chunkiter, revmap, trp)
2003 self.manifest.addgroup(chunkiter, revmap, trp)
2003
2004
2004 # process the files
2005 # process the files
2005 self.ui.status(_("adding file changes\n"))
2006 self.ui.status(_("adding file changes\n"))
2006 while 1:
2007 while 1:
2007 f = changegroup.getchunk(source)
2008 f = changegroup.getchunk(source)
2008 if not f:
2009 if not f:
2009 break
2010 break
2010 self.ui.debug(_("adding %s revisions\n") % f)
2011 self.ui.debug(_("adding %s revisions\n") % f)
2011 fl = self.file(f)
2012 fl = self.file(f)
2012 o = len(fl)
2013 o = len(fl)
2013 chunkiter = changegroup.chunkiter(source)
2014 chunkiter = changegroup.chunkiter(source)
2014 if fl.addgroup(chunkiter, revmap, trp) is None:
2015 if fl.addgroup(chunkiter, revmap, trp) is None:
2015 raise util.Abort(_("received file revlog group is empty"))
2016 raise util.Abort(_("received file revlog group is empty"))
2016 revisions += len(fl) - o
2017 revisions += len(fl) - o
2017 files += 1
2018 files += 1
2018
2019
2019 # make changelog see real files again
2020 # make changelog see real files again
2020 cl.finalize(trp)
2021 cl.finalize(trp)
2021
2022
2022 newheads = len(self.changelog.heads())
2023 newheads = len(self.changelog.heads())
2023 heads = ""
2024 heads = ""
2024 if oldheads and newheads != oldheads:
2025 if oldheads and newheads != oldheads:
2025 heads = _(" (%+d heads)") % (newheads - oldheads)
2026 heads = _(" (%+d heads)") % (newheads - oldheads)
2026
2027
2027 self.ui.status(_("added %d changesets"
2028 self.ui.status(_("added %d changesets"
2028 " with %d changes to %d files%s\n")
2029 " with %d changes to %d files%s\n")
2029 % (changesets, revisions, files, heads))
2030 % (changesets, revisions, files, heads))
2030
2031
2031 if changesets > 0:
2032 if changesets > 0:
2032 self.hook('pretxnchangegroup', throw=True,
2033 self.hook('pretxnchangegroup', throw=True,
2033 node=hex(self.changelog.node(cor+1)), source=srctype,
2034 node=hex(self.changelog.node(cor+1)), source=srctype,
2034 url=url)
2035 url=url)
2035
2036
2036 tr.close()
2037 tr.close()
2037 finally:
2038 finally:
2038 del tr
2039 del tr
2039
2040
2040 if changesets > 0:
2041 if changesets > 0:
2041 # forcefully update the on-disk branch cache
2042 # forcefully update the on-disk branch cache
2042 self.ui.debug(_("updating the branch cache\n"))
2043 self.ui.debug(_("updating the branch cache\n"))
2043 self.branchtags()
2044 self.branchtags()
2044 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2045 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2045 source=srctype, url=url)
2046 source=srctype, url=url)
2046
2047
2047 for i in xrange(cor + 1, cnr + 1):
2048 for i in xrange(cor + 1, cnr + 1):
2048 self.hook("incoming", node=hex(self.changelog.node(i)),
2049 self.hook("incoming", node=hex(self.changelog.node(i)),
2049 source=srctype, url=url)
2050 source=srctype, url=url)
2050
2051
2051 # never return 0 here:
2052 # never return 0 here:
2052 if newheads < oldheads:
2053 if newheads < oldheads:
2053 return newheads - oldheads - 1
2054 return newheads - oldheads - 1
2054 else:
2055 else:
2055 return newheads - oldheads + 1
2056 return newheads - oldheads + 1
2056
2057
2057
2058
2058 def stream_in(self, remote):
2059 def stream_in(self, remote):
2059 fp = remote.stream_out()
2060 fp = remote.stream_out()
2060 l = fp.readline()
2061 l = fp.readline()
2061 try:
2062 try:
2062 resp = int(l)
2063 resp = int(l)
2063 except ValueError:
2064 except ValueError:
2064 raise util.UnexpectedOutput(
2065 raise util.UnexpectedOutput(
2065 _('Unexpected response from remote server:'), l)
2066 _('Unexpected response from remote server:'), l)
2066 if resp == 1:
2067 if resp == 1:
2067 raise util.Abort(_('operation forbidden by server'))
2068 raise util.Abort(_('operation forbidden by server'))
2068 elif resp == 2:
2069 elif resp == 2:
2069 raise util.Abort(_('locking the remote repository failed'))
2070 raise util.Abort(_('locking the remote repository failed'))
2070 elif resp != 0:
2071 elif resp != 0:
2071 raise util.Abort(_('the server sent an unknown error code'))
2072 raise util.Abort(_('the server sent an unknown error code'))
2072 self.ui.status(_('streaming all changes\n'))
2073 self.ui.status(_('streaming all changes\n'))
2073 l = fp.readline()
2074 l = fp.readline()
2074 try:
2075 try:
2075 total_files, total_bytes = map(int, l.split(' ', 1))
2076 total_files, total_bytes = map(int, l.split(' ', 1))
2076 except (ValueError, TypeError):
2077 except (ValueError, TypeError):
2077 raise util.UnexpectedOutput(
2078 raise util.UnexpectedOutput(
2078 _('Unexpected response from remote server:'), l)
2079 _('Unexpected response from remote server:'), l)
2079 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 (total_files, util.bytecount(total_bytes)))
2081 (total_files, util.bytecount(total_bytes)))
2081 start = time.time()
2082 start = time.time()
2082 for i in xrange(total_files):
2083 for i in xrange(total_files):
2083 # XXX doesn't support '\n' or '\r' in filenames
2084 # XXX doesn't support '\n' or '\r' in filenames
2084 l = fp.readline()
2085 l = fp.readline()
2085 try:
2086 try:
2086 name, size = l.split('\0', 1)
2087 name, size = l.split('\0', 1)
2087 size = int(size)
2088 size = int(size)
2088 except ValueError, TypeError:
2089 except ValueError, TypeError:
2089 raise util.UnexpectedOutput(
2090 raise util.UnexpectedOutput(
2090 _('Unexpected response from remote server:'), l)
2091 _('Unexpected response from remote server:'), l)
2091 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2092 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2092 ofp = self.sopener(name, 'w')
2093 ofp = self.sopener(name, 'w')
2093 for chunk in util.filechunkiter(fp, limit=size):
2094 for chunk in util.filechunkiter(fp, limit=size):
2094 ofp.write(chunk)
2095 ofp.write(chunk)
2095 ofp.close()
2096 ofp.close()
2096 elapsed = time.time() - start
2097 elapsed = time.time() - start
2097 if elapsed <= 0:
2098 if elapsed <= 0:
2098 elapsed = 0.001
2099 elapsed = 0.001
2099 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2100 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2100 (util.bytecount(total_bytes), elapsed,
2101 (util.bytecount(total_bytes), elapsed,
2101 util.bytecount(total_bytes / elapsed)))
2102 util.bytecount(total_bytes / elapsed)))
2102 self.invalidate()
2103 self.invalidate()
2103 return len(self.heads()) + 1
2104 return len(self.heads()) + 1
2104
2105
2105 def clone(self, remote, heads=[], stream=False):
2106 def clone(self, remote, heads=[], stream=False):
2106 '''clone remote repository.
2107 '''clone remote repository.
2107
2108
2108 keyword arguments:
2109 keyword arguments:
2109 heads: list of revs to clone (forces use of pull)
2110 heads: list of revs to clone (forces use of pull)
2110 stream: use streaming clone if possible'''
2111 stream: use streaming clone if possible'''
2111
2112
2112 # now, all clients that can request uncompressed clones can
2113 # now, all clients that can request uncompressed clones can
2113 # read repo formats supported by all servers that can serve
2114 # read repo formats supported by all servers that can serve
2114 # them.
2115 # them.
2115
2116
2116 # if revlog format changes, client will have to check version
2117 # if revlog format changes, client will have to check version
2117 # and format flags on "stream" capability, and use
2118 # and format flags on "stream" capability, and use
2118 # uncompressed only if compatible.
2119 # uncompressed only if compatible.
2119
2120
2120 if stream and not heads and remote.capable('stream'):
2121 if stream and not heads and remote.capable('stream'):
2121 return self.stream_in(remote)
2122 return self.stream_in(remote)
2122 return self.pull(remote, heads)
2123 return self.pull(remote, heads)
2123
2124
2124 # used to avoid circular references so destructors work
2125 # used to avoid circular references so destructors work
2125 def aftertrans(files):
2126 def aftertrans(files):
2126 renamefiles = [tuple(t) for t in files]
2127 renamefiles = [tuple(t) for t in files]
2127 def a():
2128 def a():
2128 for src, dest in renamefiles:
2129 for src, dest in renamefiles:
2129 util.rename(src, dest)
2130 util.rename(src, dest)
2130 return a
2131 return a
2131
2132
2132 def instance(ui, path, create):
2133 def instance(ui, path, create):
2133 return localrepository(ui, util.drop_scheme('file', path), create)
2134 return localrepository(ui, util.drop_scheme('file', path), create)
2134
2135
2135 def islocal(path):
2136 def islocal(path):
2136 return True
2137 return True
General Comments 0
You need to be logged in to leave comments. Login now